diff --git a/yarn-project/acir-simulator/src/avm/journal/journal.test.ts b/yarn-project/acir-simulator/src/avm/journal/journal.test.ts index 58b0b3345b9..1588712e1c7 100644 --- a/yarn-project/acir-simulator/src/avm/journal/journal.test.ts +++ b/yarn-project/acir-simulator/src/avm/journal/journal.test.ts @@ -29,7 +29,7 @@ describe('journal', () => { journal.writeStorage(contractAddress, key, value); const journalUpdates: JournalData = journal.flush(); - expect(journalUpdates.storageWrites.get(contractAddress.toBigInt())?.get(key.toBigInt())).toEqual(value); + expect(journalUpdates.currentStorageValue.get(contractAddress.toBigInt())?.get(key.toBigInt())).toEqual(value); }); it('When reading from storage, should check the parent first', async () => { @@ -61,7 +61,7 @@ describe('journal', () => { expect(cachedResult).toEqual(cachedValue); }); - it('When reading from storage, should check the cache first', async () => { + it('When reading from storage, should check the cache first, and be appended to read/write journal', async () => { // Store a different value in storage vs the cache, and make sure the cache is returned const contractAddress = new Fr(1); const key = new Fr(2); @@ -80,6 +80,16 @@ describe('journal', () => { // Get the storage value const cachedResult = await journal.readStorage(contractAddress, key); expect(cachedResult).toEqual(cachedValue); + + // We expect the journal to store the access in [storedVal, cachedVal] - [time0, time1] + const { storageReads, storageWrites }: JournalData = journal.flush(); + const contractReads = storageReads.get(contractAddress.toBigInt()); + const keyReads = contractReads?.get(key.toBigInt()); + expect(keyReads).toEqual([storedValue, cachedValue]); + + const contractWrites = storageWrites.get(contractAddress.toBigInt()); + const keyWrites = contractWrites?.get(key.toBigInt()); + expect(keyWrites).toEqual([cachedValue]); }); }); @@ -127,17 +137,19 @@ describe('journal', () => { const logsT1 = [new Fr(3), new Fr(4)]; journal.writeStorage(contractAddress, key, value); + await journal.readStorage(contractAddress, key); journal.writeNoteHash(commitment); journal.writeLog(logs); journal.writeL1Message(logs); journal.writeNullifier(commitment); const journal1 = new AvmJournal(journal.hostStorage, journal); - journal.writeStorage(contractAddress, key, valueT1); - journal.writeNoteHash(commitmentT1); - journal.writeLog(logsT1); - journal.writeL1Message(logsT1); - journal.writeNullifier(commitmentT1); + journal1.writeStorage(contractAddress, key, valueT1); + await journal1.readStorage(contractAddress, key); + journal1.writeNoteHash(commitmentT1); + journal1.writeLog(logsT1); + journal1.writeL1Message(logsT1); + journal1.writeNullifier(commitmentT1); journal1.mergeWithParent(); @@ -147,6 +159,18 @@ describe('journal', () => { // Check that the UTXOs are merged const journalUpdates: JournalData = journal.flush(); + + // Check storage reads order is preserved upon merge + // We first read value from t0, then value from t1 + const contractReads = journalUpdates.storageReads.get(contractAddress.toBigInt()); + const slotReads = contractReads?.get(key.toBigInt()); + expect(slotReads).toEqual([value, valueT1]); + + // We first write value from t0, then value from t1 + const contractWrites = journalUpdates.storageReads.get(contractAddress.toBigInt()); + const slotWrites = contractWrites?.get(key.toBigInt()); + expect(slotWrites).toEqual([value, valueT1]); + expect(journalUpdates.newNoteHashes).toEqual([commitment, commitmentT1]); expect(journalUpdates.newLogs).toEqual([logs, logsT1]); expect(journalUpdates.newL1Messages).toEqual([logs, logsT1]); diff --git a/yarn-project/acir-simulator/src/avm/journal/journal.ts b/yarn-project/acir-simulator/src/avm/journal/journal.ts index 6168ce2cdf2..699d6b93f00 100644 --- a/yarn-project/acir-simulator/src/avm/journal/journal.ts +++ b/yarn-project/acir-simulator/src/avm/journal/journal.ts @@ -11,8 +11,14 @@ export type JournalData = { newNullifiers: Fr[]; newL1Messages: Fr[][]; newLogs: Fr[][]; + /** contract address -\> key -\> value */ - storageWrites: Map>; + currentStorageValue: Map>; + + /** contract address -\> key -\> value[] (stored in order of access) */ + storageWrites: Map>; + /** contract address -\> key -\> value[] (stored in order of access) */ + storageReads: Map>; }; /** @@ -28,9 +34,9 @@ export class AvmJournal { public readonly hostStorage: HostStorage; // Reading state - must be tracked for vm execution - // contract address -> key -> value - // TODO(https://github.com/AztecProtocol/aztec-packages/issues/3999) - private storageReads: Map> = new Map(); + // contract address -> key -> value[] (array stored in order of reads) + private storageReads: Map> = new Map(); + private storageWrites: Map> = new Map(); // New written state private newNoteHashes: Fr[] = []; @@ -41,7 +47,7 @@ export class AvmJournal { private newLogs: Fr[][] = []; // contract address -> key -> value - private storageWrites: Map> = new Map(); + private currentStorageValue: Map> = new Map(); private parentJournal: AvmJournal | undefined; @@ -74,12 +80,15 @@ export class AvmJournal { * @param value - */ public writeStorage(contractAddress: Fr, key: Fr, value: Fr) { - let contractMap = this.storageWrites.get(contractAddress.toBigInt()); + let contractMap = this.currentStorageValue.get(contractAddress.toBigInt()); if (!contractMap) { contractMap = new Map(); - this.storageWrites.set(contractAddress.toBigInt(), contractMap); + this.currentStorageValue.set(contractAddress.toBigInt(), contractMap); } contractMap.set(key.toBigInt(), value); + + // We want to keep track of all performed writes in the journal + this.journalWrite(contractAddress, key, value); } /** @@ -90,17 +99,52 @@ export class AvmJournal { * @param key - * @returns current value */ - public readStorage(contractAddress: Fr, key: Fr): Promise { - const cachedValue = this.storageWrites.get(contractAddress.toBigInt())?.get(key.toBigInt()); - if (cachedValue) { - return Promise.resolve(cachedValue); + public async readStorage(contractAddress: Fr, key: Fr): Promise { + // - We first try this journal's storage cache ( if written to before in this call frame ) + // - Then we try the parent journal's storage cache ( if it exists ) ( written to earlier in this block ) + // - Finally we try the host storage ( a trip to the database ) + + // Do not early return as we want to keep track of reads in this.storageReads + let value = this.currentStorageValue.get(contractAddress.toBigInt())?.get(key.toBigInt()); + if (!value && this.parentJournal) { + value = await this.parentJournal?.readStorage(contractAddress, key); } - if (this.parentJournal) { - return this.parentJournal?.readStorage(contractAddress, key); + if (!value) { + value = await this.hostStorage.publicStateDb.storageRead(contractAddress, key); } - return this.hostStorage.publicStateDb.storageRead(contractAddress, key); + + this.journalRead(contractAddress, key, value); + return Promise.resolve(value); } + /** + * We want to keep track of all performed reads in the journal + * This information is hinted to the avm circuit + + * @param contractAddress - + * @param key - + * @param value - + */ + journalUpdate(map: Map>, contractAddress: Fr, key: Fr, value: Fr): void { + let contractMap = map.get(contractAddress.toBigInt()); + if (!contractMap) { + contractMap = new Map>(); + map.set(contractAddress.toBigInt(), contractMap); + } + + let accessArray = contractMap.get(key.toBigInt()); + if (!accessArray) { + accessArray = new Array(); + contractMap.set(key.toBigInt(), accessArray); + } + accessArray.push(value); + } + + // Create an instance of journalUpdate that appends to the read array + private journalRead = this.journalUpdate.bind(this, this.storageReads); + // Create an instance of journalUpdate that appends to the writes array + private journalWrite = this.journalUpdate.bind(this, this.storageWrites); + public writeNoteHash(noteHash: Fr) { this.newNoteHashes.push(noteHash); } @@ -131,9 +175,14 @@ export class AvmJournal { this.parentJournal.newNoteHashes = this.parentJournal.newNoteHashes.concat(this.newNoteHashes); this.parentJournal.newL1Messages = this.parentJournal.newL1Messages.concat(this.newL1Messages); this.parentJournal.newNullifiers = this.parentJournal.newNullifiers.concat(this.newNullifiers); + this.parentJournal.newLogs = this.parentJournal.newLogs.concat(this.newLogs); // Merge Public State - mergeContractMaps(this.parentJournal.storageWrites, this.storageWrites); + mergeCurrentValueMaps(this.parentJournal.currentStorageValue, this.currentStorageValue); + + // Merge storage read and write journals + mergeContractJournalMaps(this.parentJournal.storageReads, this.storageReads); + mergeContractJournalMaps(this.parentJournal.storageWrites, this.storageWrites); } /** @@ -147,13 +196,15 @@ export class AvmJournal { newNullifiers: this.newNullifiers, newL1Messages: this.newL1Messages, newLogs: this.newLogs, + currentStorageValue: this.currentStorageValue, + storageReads: this.storageReads, storageWrites: this.storageWrites, }; } } /** - * Merges two contract maps together + * Merges two contract current value together * Where childMap keys will take precedent over the hostMap * The assumption being that the child map is created at a later time * And thus contains more up to date information @@ -161,24 +212,56 @@ export class AvmJournal { * @param hostMap - The map to be merged into * @param childMap - The map to be merged from */ -function mergeContractMaps(hostMap: Map>, childMap: Map>) { +function mergeCurrentValueMaps(hostMap: Map>, childMap: Map>) { for (const [key, value] of childMap) { const map1Value = hostMap.get(key); if (!map1Value) { hostMap.set(key, value); } else { - mergeStorageMaps(map1Value, value); + mergeStorageCurrentValueMaps(map1Value, value); } } } /** - * * @param hostMap - The map to be merge into * @param childMap - The map to be merged from */ -function mergeStorageMaps(hostMap: Map, childMap: Map) { +function mergeStorageCurrentValueMaps(hostMap: Map, childMap: Map) { for (const [key, value] of childMap) { hostMap.set(key, value); } } + +/** + * Merges two contract journalling maps together + * For read maps, we just append the childMap arrays into the host map arrays, as the order is important + * + * @param hostMap - The map to be merged into + * @param childMap - The map to be merged from + */ +function mergeContractJournalMaps(hostMap: Map>, childMap: Map>) { + for (const [key, value] of childMap) { + const map1Value = hostMap.get(key); + if (!map1Value) { + hostMap.set(key, value); + } else { + mergeStorageJournalMaps(map1Value, value); + } + } +} + +/** + * @param hostMap - The map to be merge into + * @param childMap - The map to be merged from + */ +function mergeStorageJournalMaps(hostMap: Map, childMap: Map) { + for (const [key, value] of childMap) { + const readArr = hostMap.get(key); + if (!readArr) { + hostMap.set(key, value); + } else { + readArr?.concat(value); + } + } +} diff --git a/yarn-project/acir-simulator/src/avm/opcodes/external_calls.test.ts b/yarn-project/acir-simulator/src/avm/opcodes/external_calls.test.ts index 1d91c3bc405..50c29128ec5 100644 --- a/yarn-project/acir-simulator/src/avm/opcodes/external_calls.test.ts +++ b/yarn-project/acir-simulator/src/avm/opcodes/external_calls.test.ts @@ -78,10 +78,10 @@ describe('External Calls', () => { expect(retValue).toEqual([new Field(1n), new Field(2n)]); // Check that the storage call has been merged into the parent journal - const { storageWrites } = journal.flush(); - expect(storageWrites.size).toEqual(1); + const { currentStorageValue } = journal.flush(); + expect(currentStorageValue.size).toEqual(1); - const nestedContractWrites = storageWrites.get(addr.toBigInt()); + const nestedContractWrites = currentStorageValue.get(addr.toBigInt()); expect(nestedContractWrites).toBeDefined(); const slotNumber = 1n;