-
Notifications
You must be signed in to change notification settings - Fork 43
/
boot.cc
1384 lines (1283 loc) · 46.6 KB
/
boot.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright Microsoft and CHERIoT Contributors.
// SPDX-License-Identifier: MIT
#include <cdefs.h>
// memcpy is exposed as a libcall in the standard library headers but we want
// to ensure that our version is called directly and not exposed to anything
// else.
#undef __cheri_libcall
#define __cheri_libcall
#include <string.h>
#include "../switcher/tstack.h"
#include "constants.h"
#include "debug.hh"
#include "defines.h"
#include "types.h"
#include <cheri.hh>
#include <platform-uart.hh>
#include <priv/riscv.h>
#include <riscvreg.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
using namespace CHERI;
namespace
{
/**
* Round up to a multiple of `Multiple`, which must be a power of two.
*/
template<size_t Multiple>
constexpr size_t round_up(size_t value)
{
static_assert((Multiple & (Multiple - 1)) == 0,
"Multiple must be a power of two");
return (value + Multiple - 1) & -Multiple;
}
static_assert(round_up<16>(15) == 16);
static_assert(round_up<16>(28) == 32);
static_assert(round_up<8>(17) == 24);
__BEGIN_DECLS
static_assert(CheckSize<CHERIOT_LOADER_TRUSTED_STACK_SIZE,
sizeof(TrustedStackGeneric<0>)>::value,
"Boot trusted stack sizes do not match.");
// It must also be aligned sufficiently for trusted stacks, so ensure that
// we've captured that requirement above.
static_assert(alignof(TrustedStack) <= 16);
__END_DECLS
static_assert(
CheckSize<sizeof(ThreadLoaderInfo), BOOT_THREADINFO_SZ>::value);
/**
* Reserved sealing types.
*/
enum SealingType
{
/**
* 0 represents unsealed.
*/
Unsealed = 0,
/**
* Sentry that inherits interrupt status.
*/
SentryInheriting,
/// Alternative name: the default sentry type.
Sentry = SentryInheriting,
/**
* Sentry that disables interrupts on calls.
*/
SentryDisabling,
/**
* Sentry that enables interrupts on calls.
*/
SentryEnabling,
/**
* Marker for the first sealing type that's valid for data capabilities.
*/
FirstDataSealingType = 9,
/**
* The sealing type used for sealed export table entries.
*/
SealedImportTableEntries = FirstDataSealingType,
/**
* The compartment switcher has a sealing type for the trusted stack.
*
* This must be the second data sealing type so that we can also permit
* the switcher to unseal sentries and export table entries.
*/
SealedTrustedStacks,
/**
* The scheduler has a sealing type for waitable objects.
*/
Scheduler,
/**
* The allocator has a sealing type for the software sealing mechanism.
*/
Allocator,
/**
* The first sealing key that is reserved for use by the allocator's
* software sealing mechanism and used for static sealing types,
*/
FirstStaticSoftware = 16,
/**
* The first sealing key in the space that the allocator will
* dynamically allocate for sealing types.
*/
FirstDynamicSoftware = 0x1000000,
};
// We currently have a 3-bit hardware otype, with different sealing spaces
// for code and data capabilities, giving the range 0-0xf reserved for
// hardware use. Assert that we're not using more than we need (two in the
// enum are outside of the hardware space).
static_assert(magic_enum::enum_count<SealingType>() <= 10,
"Too many sealing types reserved for a 3-bit otype field");
constexpr auto StoreLPerm = Root::Permissions<Root::Type::RWStoreL>;
/// PCC permissions for the switcher.
constexpr auto SwitcherPccPermissions =
Root::Permissions<Root::Type::Execute>;
/// PCC permissions for unprivileged compartments
constexpr auto UserPccPermissions =
Root::Permissions<Root::Type::Execute>.without(
Permission::AccessSystemRegisters);
template<typename T, typename U>
T align_up(T x, U align)
{
return __builtin_align_up(x, align);
}
/**
* Returns a capability of type T* derived from the root specified by Type,
* with the specified permissions. The start and length are given as
* arguments.
*/
template<typename T = void,
Root::Type Type = Root::Type::RWGlobal,
PermissionSet Permissions = Root::Permissions<Type>,
bool Precise = true>
Capability<T> build(ptraddr_t start, size_t length)
{
return static_cast<T *>(
Root::build_from_root<Type, Permissions, Precise>(start, length));
}
/**
* Builds a capability with bounds to access a single object of type `T`,
* which starts at address `start`. The root and permissions are specified
* as template arguments.
*/
template<typename T,
Root::Type Type = Root::Type::RWGlobal,
PermissionSet Permissions = Root::Permissions<Type>>
Capability<T> build(ptraddr_t start)
{
return build<T, Type, Permissions>(start, sizeof(T));
}
/**
* Builds a capability with bounds specified by `start` and `length`, which
* points to an object of type `T`, at address `address`. This is derived
* from the root and with the permissions given as template arguments.
*/
template<typename T = void,
Root::Type Type = Root::Type::RWGlobal,
PermissionSet Permissions = Root::Permissions<Type>>
Capability<T> build(ptraddr_t start, size_t length, ptraddr_t address)
{
Capability<T> ret{static_cast<T *>(
Root::build_from_root<Type, Permissions>(start, length))};
ret.address() = address;
return ret;
}
/**
* Build a capability covering a range specified by a range (
*/
template<typename T = void,
Root::Type Type = Root::Type::RWGlobal,
PermissionSet Permissions = Root::Permissions<Type>,
bool Precise = true>
Capability<T> build(auto &&range) requires(RawAddressRange<decltype(range)>)
{
return build<T, Type, Permissions, Precise>(range.start(),
range.size());
}
/**
* Build a capability to an object of type `T` from a range (start and size
* address).
*/
template<typename T = void,
Root::Type Type = Root::Type::RWGlobal,
PermissionSet Permissions = Root::Permissions<Type>>
Capability<T>
build(auto &&range,
ptraddr_t address) requires(RawAddressRange<decltype(range)>)
{
return build<T, Type, Permissions>(
range.start(), range.size(), address);
}
/**
* Build the PCC for a compartment. Permissions can be overridden for more
* / less privilege compartments.
*/
template<const PermissionSet Permissions = UserPccPermissions>
Capability<void> build_pcc(const auto &compartment)
{
return build<void, Root::Type::Execute, Permissions>(compartment.code);
}
/**
* Build a capability to a compartment's globals. Permissions can be
* overridden via a template parameter for non-default options.
*
* By default, this function returns a biased $cgp value: the address
* points to the middle of the range. This can be disabled by passing
* `false` as the second template parameter.
*/
Capability<void> build_cgp(const auto &compartment, bool bias = true)
{
auto cgp = build<void, Root::Type::RWGlobal>(compartment.data);
if (bias)
{
cgp.address() += (compartment.data.size() / 2);
}
return cgp;
}
/**
* Returns a sealing capability to use for statically allocated sealing
* keys.
*/
uint16_t allocate_static_sealing_key()
{
static uint16_t nextValue = FirstStaticSoftware;
// We currently stash the allocated key value in the export table. We
// could expand this a bit if we were a bit more clever in how we used
// that space, but 2^16 static sealing keys will require over 768 KiB
// of SRAM to store in the firmware, which seems excessive.
Debug::Invariant(nextValue < std::numeric_limits<uint16_t>::max(),
"Out of static sealing keys");
return nextValue++;
}
/**
* Returns a sealing capability in the sealing space with the specified
* type.
*/
void *build_static_sealing_key(uint16_t type)
{
static void *staticSealingRoot;
Debug::Invariant(type >= FirstStaticSoftware,
"{} is not a valid software sealing key",
type);
if (staticSealingRoot == nullptr)
{
staticSealingRoot =
build<void,
Root::Type::Seal,
PermissionSet{Permission::Global,
Permission::Seal,
Permission::Unseal,
Permission::User0}>(0, FirstDynamicSoftware);
}
Capability next = staticSealingRoot;
next.address() = type;
next.bounds() = 1;
Debug::Invariant(
next.is_valid(), "Invalid static sealing key {}", next);
return next;
}
template<typename T>
T *seal_entry(Capability<T> ptr, InterruptStatus status)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc99-designator"
constexpr SealingType Sentries[] = {
[int(InterruptStatus::Enabled)] = SentryEnabling,
[int(InterruptStatus::Disabled)] = SentryDisabling,
[int(InterruptStatus::Inherited)] = SentryInheriting};
#pragma clang diagnostic pop
Debug::Invariant(
unsigned(status) < 3, "Invalid interrupt status {}", int(status));
size_t otype = size_t{Sentries[int(status)]};
void *key = build<void, Root::Type::Seal>(otype, 1);
return ptr.seal(key);
}
/**
* Helper to determine whether an object, given by a start address and size,
* is completely contained within a specified range.
*/
bool contains(const auto &range,
ptraddr_t addr,
size_t size) requires(RawAddressRange<decltype(range)>)
{
return (range.start() <= addr) &&
(range.start() + range.size() >= addr + size);
}
/**
* Helper to determine whether an address is within a range. The template
* parameter specifies the type that the object is expected to be. The
* object must be completely contained within the range.
*/
template<typename T = char>
bool contains(const auto &range,
ptraddr_t addr) requires(RawAddressRange<decltype(range)>)
{
return contains(range, addr, sizeof(T));
}
/**
* Helper class representing a range (which can be used with range-based
* for loops) built from pointers to contiguous memory.
*/
template<typename T>
class ContiguousPtrRange
{
/**
* Pointer to the first element.
*/
T *start;
/**
* Pointer to one past the last element.
*/
T *finish;
public:
/**
* Constructor, takes pointers to the beginning and end of an array.
*/
ContiguousPtrRange(T *s, T *e) : start(s), finish(e) {}
/**
* Returns a pointer to the start.
*/
T *begin()
{
return start;
}
/**
* Returns a pointer to the end.
*/
T *end()
{
return finish;
}
};
/**
* Build an range for use with range-based for loops iterating over objects
* of type `T` from a virtual address range.
*/
template<typename T, bool Precise = true>
ContiguousPtrRange<T>
build_range(const auto &range) requires(RawAddressRange<decltype(range)>)
{
Capability<T> start = build<T,
Root::Type::RWGlobal,
Root::Permissions<Root::Type::RWGlobal>,
Precise>(range);
Capability<T> end = start;
end.address() += range.size();
return {start, end};
}
/**
* The sealing key for the switcher, used to seal all jump targets in the
* import tables.
*/
Capability<void> switcherKey;
/**
* The sealing key for sealing trusted stacks.
*/
Capability<void> trustedStackKey;
/**
* Find an export table target. This looks for the `target` address within
* all of the export tables in the image. The `size` parameter is used if
* this is an MMIO import, the `lib` parameter is used to derive
* capabilities for the library compartment.
*/
void *find_export_target(const ImgHdr &image,
const auto &sourceCompartment,
ImportEntry &entry)
{
// Build an MMIO capability.
auto buildMMIO = [&]() {
Debug::log("Building mmio capability {} + {} (permissions: {})",
entry.address,
entry.size(),
entry.permissions());
if constexpr (std::is_same_v<
std::remove_cvref_t<decltype(sourceCompartment)>,
ImgHdr::PrivilegedCompartment>)
{
if (&sourceCompartment == &image.allocator())
{
if (entry.address == LA_ABS(__export_mem_heap) &&
(entry.size() == (LA_ABS(__export_mem_heap_end) -
LA_ABS(__export_mem_heap))))
{
ptraddr_t end = entry.address + entry.size();
Debug::log(
"Rounding heap ({}--{}) region", entry.address, end);
size_t sizeMask =
__builtin_cheri_representable_alignment_mask(
entry.size());
Debug::log("Applying mask {} to entry.size", sizeMask);
size_t roundedSize = entry.size() & sizeMask;
ptraddr_t roundedBase = end - roundedSize;
Debug::log("Rounding heap entry.size down from {} to "
"{} (rounded up "
"to {})",
entry.size(),
roundedSize,
__builtin_cheri_round_representable_length(
entry.size()));
Debug::Invariant(
(end & ~sizeMask) == 0,
"End of heap ({}) is not sufficiently aligned ({})",
end,
sizeMask);
Debug::log(
"Assigning rounded heap (in {}--{}) to the allocator",
roundedBase,
roundedBase + roundedSize);
Debug::Invariant(
roundedBase >= entry.address,
"Rounding heap base ({}) up to {} rounded down!",
entry.address,
roundedBase);
auto heap = build(roundedBase, roundedSize);
Debug::log("Heap: {}", heap);
Debug::Assert(heap.is_valid(),
"Heap capability rounding went wrong "
"somehow ({} is untagged)",
heap);
return heap;
}
}
}
// MMIO regions should be within the range of the MMIO space. As a
// special case (to be removed at some point when we have a generic
// way of setting up shared objects), allow the hazard-pointer
// region and hazard epoch to be excluded from this test.
Debug::Invariant(
((entry.address >= LA_ABS(__mmio_region_start)) &&
(entry.address + entry.size() <= LA_ABS(__mmio_region_end))) ||
((entry.address == LA_ABS(__export_mem_allocator_epoch)) &&
(entry.address + entry.size() ==
LA_ABS(__export_mem_allocator_epoch_end))) ||
((entry.address == LA_ABS(__export_mem_hazard_pointers)) &&
(entry.address + entry.size() ==
LA_ABS(__export_mem_hazard_pointers_end))),
"{}--{} is not in the MMIO range ({}--{}) or the hazard pointer "
"range ({}--{}) or the allocator epoch range ({}--{})",
entry.address,
entry.address + entry.size(),
LA_ABS(__mmio_region_start),
LA_ABS(__mmio_region_end),
LA_ABS(__export_mem_hazard_pointers),
LA_ABS(__export_mem_hazard_pointers_end),
LA_ABS(__export_mem_allocator_epoch),
LA_ABS(__export_mem_allocator_epoch_end));
auto ret = build(entry.address, entry.size());
// Remove any permissions that shouldn't be held here.
ret.permissions() &= entry.permissions();
return ret;
};
// Build an export table entry for the given compartment.
auto buildExportEntry = [&](const auto &compartment) {
auto exportEntry = build(compartment.exportTable, entry.address)
.template cast<ExportEntry>();
auto interruptStatus = exportEntry->interrupt_status();
Debug::Invariant((interruptStatus == InterruptStatus::Enabled) ||
(interruptStatus == InterruptStatus::Disabled),
"Functions exported from compartments must have "
"an explicit interrupt posture");
return build(compartment.exportTable, entry.address)
.seal(switcherKey);
};
// If the low bit is 1, then this is either an MMIO region or direct
// call via a sentry. The latter case covers code in shared
// (stateless) libraries and explicit interrupt-toggling sentries for
// code within the current compartment.
// First check if it's a sentry call.
if (entry.address & 1)
{
// Clear the low bit to give the real address.
auto possibleLibcall = entry.address & ~1U;
// Helper to create the target of a library call.
auto createLibCall = [&](Capability<void> pcc) {
// Libcall export table entries are just sentry capabilities to
// the real target. We need to get the address and interrupt
// status of the function from the target's export table and
// then construct a sentry of the correct kind derived from the
// compartment's PCC.
auto ent = build<ExportEntry>(possibleLibcall);
pcc.address() += ent->functionStart;
return seal_entry(pcc, ent->interrupt_status());
};
// If this is a libcall, set it up
for (auto &lib : image.libraries())
{
if (contains<ExportEntry>(lib.exportTable, possibleLibcall))
{
// TODO: Library export tables are not used after the
// loader has run, we could move them to the end of the
// image and make that space available for the heap.
return createLibCall(build_pcc(lib));
}
}
for (auto &compartment : image.privilegedCompartments)
{
if (!compartment.is_privileged_library())
{
continue;
}
if (contains<ExportEntry>(compartment.exportTable,
possibleLibcall))
{
// TODO: Privileged library export tables should be moved
// to the end of the image as well.
return createLibCall(build_pcc(compartment));
}
}
// The switcher is a special case, it needs a richer set of
// permission (access system register) than other compartments, but
// is exposed as a library.
if (contains<ExportEntry>(image.switcher.exportTable,
possibleLibcall))
{
auto ent = build<ExportEntry>(possibleLibcall);
auto pcc =
build<void, Root::Type::Execute, SwitcherPccPermissions>(
image.switcher.code);
pcc.address() += ent->functionStart;
return seal_entry(pcc, ent->interrupt_status());
}
// We also use the library calling convention for local callbacks,
// so see if this points to our own export table.
if (contains<ExportEntry>(sourceCompartment.exportTable,
possibleLibcall))
{
return createLibCall(build_pcc(sourceCompartment));
}
// Otherwise this is an MMIO space entry (we allow byte-granularity
// delegation of MMIO objects, so a low bit of 1 might be a
// coincidence).
return buildMMIO();
}
{
if (contains(
sourceCompartment.sealedObjects, entry.address, entry.size()))
{
auto sealingType =
build<uint32_t,
Root::Type::RWGlobal,
PermissionSet{Permission::Load, Permission::Store}>(
entry.address);
// Is the software sealing type owned by the scheduler? If so,
// we're going to seal the object with the scheduler's sealing
// type, not the allocator's. This lets the scheduler export
// software-defined capabilities without adding the allocator
// to the TCB for availability.
bool isSchedulerObject = false;
// TODO: This currently places a restriction that data memory
// can't be in the low 64 KiB of the address space. That may be
// too restrictive. If we haven't visited this sealed object
// yet, then we should update its first word to point to the
// sealing type.
if (*sealingType >
std::numeric_limits<
decltype(ExportEntry::functionStart)>::max())
{
auto typeAddress = *sealingType;
auto findExport = [&](auto &compartment) {
if (contains<ExportEntry>(compartment.exportTable,
typeAddress))
{
auto exportEntry = build<ExportEntry>(
compartment.exportTable, typeAddress);
Debug::Invariant(
exportEntry->is_sealing_type(),
"Sealed object points to invalid sealing type");
*sealingType = exportEntry->functionStart;
return true;
}
return false;
};
bool found = findExport(image.allocator());
if (!found && findExport(image.scheduler()))
{
found = true;
isSchedulerObject = true;
}
for (auto &compartment : image.compartments())
{
if (found)
{
break;
}
found = findExport(compartment);
}
Debug::Invariant(*sealingType != typeAddress,
"Invalid sealed object {}",
typeAddress);
}
Capability sealedObject = build(entry.address, entry.size());
// Seal with the allocator's sealing key
sealedObject.seal(build<void, Root::Type::Seal>(
isSchedulerObject ? Scheduler : Allocator, 1));
Debug::log("Static sealed object: {}", sealedObject);
return sealedObject;
}
}
for (auto &compartment : image.privilegedCompartments)
{
if (contains<ExportEntry>(compartment.exportTable, entry.address))
{
return buildExportEntry(compartment);
}
}
for (auto &compartment : image.compartments())
{
if (contains<ExportEntry>(compartment.exportTable, entry.address))
{
return buildExportEntry(compartment);
}
}
return buildMMIO();
}
/**
* As a first pass, scan the import table of this compartment and resolve
* any static sealing types.
*/
void populate_static_sealing_keys(const ImgHdr &image,
const auto &compartment)
{
if (compartment.exportTable.size() == 0)
{
return;
}
const auto &importTable = compartment.import_table();
if (importTable.size() == 0)
{
return;
}
// The import table might not have strongly aligned bounds and so we
// are happy with an imprecise capability here.
auto impPtr = build<ImportTable,
Root::Type::RWGlobal,
Root::Permissions<Root::Type::RWGlobal>,
false>(importTable);
// FIXME: This should use a range-based for loop
for (int i = 0; i < (importTable.size() / sizeof(void *)) - 1; i++)
{
ptraddr_t importAddr = impPtr->imports[i].address;
size_t importSize = impPtr->imports[i].size();
// If the size is not 0, this isn't an import table entry.
if (importSize != 0)
{
continue;
}
// If the low bit is 1, it's either a library import or an MMIO
// import. Skip it either way.
if (importAddr & 1)
{
continue;
}
// If this points anywhere other than the current compartment's
// export table, it isn't a sealing capability entry.
if (!contains(compartment.exportTable, importAddr))
{
continue;
}
// Build an export table entry for the given compartment.
auto exportEntry =
build<ExportEntry>(compartment.exportTable, importAddr);
// If the export entry isn't a sealing type, this is not a
// reference to a sealing capability.
if (!exportEntry->is_sealing_type())
{
continue;
}
Debug::Invariant(exportEntry->functionStart == 0,
"Two import entries point to the same export "
"entry for a sealing key {}",
exportEntry);
// Allocate a new sealing key type.
exportEntry->functionStart = allocate_static_sealing_key();
Debug::log("Creating sealing key {}", exportEntry->functionStart);
// Build the sealing key corresponding to that type.
impPtr->imports[i].pointer =
build_static_sealing_key(exportEntry->functionStart);
}
}
/**
* Populate an import table. The import table is described by the
* `importTable` argument. The compartment switcher and the library
* compartment are passed as arguments.
*/
void populate_imports(const ImgHdr &image,
const auto &sourceCompartment,
void *switcher)
{
const auto &importTable = sourceCompartment.import_table();
if (importTable.size() == 0)
{
return;
}
Debug::log("Import table: {}, {} bytes",
importTable.start(),
importTable.size());
// The import table might not have strongly aligned bounds and so we
// are happy with an imprecise capability here.
auto importTablePointer = build<ImportTable,
Root::Type::RWStoreL,
Root::Permissions<Root::Type::RWStoreL>,
false>(importTable);
importTablePointer->switcher = switcher;
// FIXME: This should use a range-based for loop
for (int i = 0; i < (importTable.size() / sizeof(void *)) - 1; i++)
{
// If this is a sealing key then we will have initialised it
// already, skip it now.
if (Capability{importTablePointer->imports[i].pointer}.is_valid())
{
Debug::log("Skipping sealing type import");
continue;
}
importTablePointer->imports[i].pointer = find_export_target(
image, sourceCompartment, importTablePointer->imports[i]);
}
}
/**
* Construct the boot threads.
*/
void boot_threads_create(const ImgHdr &image, ThreadLoaderInfo *threadInfo)
{
// Two hazard pointers per thread. More makes free slow, fewer is hard
// to use.
static constexpr size_t HazardPointersPerThread = 2;
Capability<void *> hazardPointers =
build<void *,
Root::Type::RWGlobal,
PermissionSet{Permission::Store,
Permission::LoadStoreCapability}>(
LA_ABS(__export_mem_hazard_pointers),
LA_ABS(__export_mem_hazard_pointers_end) -
LA_ABS(__export_mem_hazard_pointers));
// Space per thread for hazard pointers.
static constexpr size_t HazardPointerSpace =
HazardPointersPerThread * sizeof(void *);
for (size_t i = 0; const auto &config : image.threads())
{
Debug::log("Creating thread {}", i);
auto findCompartment = [&]() -> auto &
{
for (auto &compartment : image.compartments())
{
Debug::log("Looking in export table {}+{}",
compartment.exportTable.start(),
compartment.exportTable.size());
if (contains(compartment.exportTable, config.entryPoint))
{
return compartment;
}
}
Debug::Invariant(
false, "Compartment entry point is not a valid export");
__builtin_unreachable();
};
const auto &compartment = findCompartment();
Debug::log("Creating thread in compartment {}", &compartment);
auto pcc = build_pcc(compartment);
pcc.address() +=
build<ExportEntry>(config.entryPoint)->functionStart;
Debug::log("New thread's pcc will be {}", pcc);
void *cgp = build_cgp(compartment);
Debug::log("New thread's cgp will be {}", cgp);
auto threadTStack =
build<TrustedStack,
Root::Type::TrustedStack,
Root::Permissions<Root::Type::TrustedStack>,
false>(config.trustedStack);
threadTStack->mepcc = pcc;
threadTStack->cgp = cgp;
// Stacks have store-local but not global permission.
auto stack =
build<void,
Root::Type::TrustedStack,
Root::Permissions<Root::Type::TrustedStack>.without(
Permission::Global),
false>(config.stack);
// Make sure that the thread's stack doesn't overlap the loader's
// stack (which will become the scheduler's stack).
Capability<void> csp = ({
register void *cspRegister asm("csp");
asm("" : "=C"(cspRegister));
cspRegister;
});
if (stack.top() <= csp.top())
{
Debug::Invariant(
stack.top() <= csp.base(),
"Thread stack {} for thread {} overlaps loader stack {}",
stack,
i,
csp);
}
if (stack.base() >= csp.base())
{
Debug::Invariant(
stack.base() >= csp.top(),
"Thread stack {} for thread {} overlaps loader stack {}",
stack,
i,
csp);
}
// Stack pointer points to the top of the stack.
stack.address() += config.stack.size();
Debug::log("Thread's stack is {}", stack);
threadTStack->csp = stack;
// Set up the space for hazard pointers.
Capability threadHazardPointers{hazardPointers};
threadHazardPointers.address() += (i * HazardPointerSpace);
threadHazardPointers.bounds() = HazardPointerSpace;
threadTStack->hazardPointers = threadHazardPointers;
// Enable previous level interrupts and set the previous exception
// level to M mode.
threadTStack->mstatus =
(priv::MSTATUS_MPIE |
(priv::MSTATUS_PRV_M << priv::MSTATUS_MPP_SHIFT));
#ifdef CONFIG_MSHWM
threadTStack->mshwm = stack.top();
threadTStack->mshwmb = stack.base();
#endif
// Set the thread ID that the switcher will return for this thread.
// This is indexed from 1, so 0 can be used to indicate the idle
// thread.
threadTStack->threadID = i + 1;
threadTStack->frameoffset = offsetof(TrustedStack, frames[1]);
threadTStack->frames[0].calleeExportTable =
build(compartment.exportTable);
Debug::log("Thread's trusted stack is {}", threadTStack);
threadTStack.seal(trustedStackKey);
threadInfo[i].trustedStack = threadTStack;
threadInfo[i].priority = config.priority;
i++;
}
Debug::log("Finished creating threads");
}
/**
* Resolve capability relocations.
*
* Note that this assumes that the firmware image was checked to ensure
* that no compartment ships with cap relocs that point to another
* compartment. This should be impossible due to how they flow through the
* linker but needs to be part of a static auditing pipeline.
*/
void populate_caprelocs(const ImgHdr &image)
{
// Helper to give the cap relocs section as a range.
struct
{
[[nodiscard]] ptraddr_t start() const
{
return LA_ABS(__cap_relocs);
}
[[nodiscard]] size_t size() const
{
return LA_ABS(__cap_relocs_end) - LA_ABS(__cap_relocs);
}
} capRelocsSection;
// Find the library compartment that contains an address in its code or
// data section.
auto findCompartment = [&](ptraddr_t address) -> auto &
{
Debug::log("Capreloc address is {}", address);
for (auto &compartment : image.libraries_and_compartments())
{
if (contains(compartment.code, address) ||
contains(compartment.data, address))
{
return compartment;
}
}
Debug::Invariant(false, "Cannot find compartment for cap reloc");
__builtin_unreachable();
};
Debug::log("Populating cap relocs the insecure way {} + {}",
capRelocsSection.start(),
capRelocsSection.size());
for (auto &reloc : build_range<CapReloc, false>(capRelocsSection))
{
Debug::log(
"Capreloc address: {}, base: {}", reloc.addr, reloc.base);
// Find the compartment that this relocation applies to.
const auto &compartment = findCompartment(reloc.addr);
// Compartment's PCC, used to derive function pointers.
auto pcc = build_pcc(compartment);
// Compartment's PCC with execute dropped. Used for pointers to
// read-only globals.
auto ropcc = pcc;
ropcc.permissions() &=
pcc.permissions().without(Permission::Execute);
// Compartment's globals region, used to derive pointers to globals
// and to write to globals.
auto cgp = build_cgp(compartment, false);
Capability<void> locationRegion;
// Cap relocs for a compartment must point to that compartment's
// code or data regions.
if (contains(compartment.code, reloc.addr))
{
locationRegion =
build<void, Root::Type::RWGlobal>(compartment.code);
}
else if (contains(compartment.data, reloc.addr))
{
locationRegion = cgp;
}
// The location is the address of the reloc, bounded to the region.
Capability<void *> location{locationRegion.cast<void *>()};
location.address() = reloc.addr;
size_t offset = reloc.offset;
Capability<void> cap;
if (reloc.is_function())
{
// If this is a function pointer, use the bounds of that
// compartment's PCC.
// FIXME: In our ABI the linker should emit function pointer
// bounds to be the whole .pcc section, not a single function.
offset += reloc.base - compartment.code.start();
cap = pcc;
}
else
{
if (contains(compartment.code, reloc.base))
{
cap = ropcc;
}
else if (contains(compartment.data, reloc.base))
{
cap = cgp;
}
else
{
Debug::Invariant(