Commit aac26f02 authored by vicotor's avatar vicotor

remove unused code.

parent 3be2cd66
rules:
- id: sol-safety-deployutils-args
languages: [solidity]
severity: ERROR
message: _args parameter should be wrapped with DeployUtils.encodeConstructor
pattern-regex: DeployUtils\.(create1|create2|create1AndSave|create2AndSave)\s*\(\s*\{[^}]*?_args\s*:\s*(?!\s*DeployUtils\.encodeConstructor\()\s*[^}]*?\}\s*\)
- id: sol-safety-expectrevert-before-ll-call
languages: [solidity]
severity: ERROR
message: vm.expectRevert is followed by a low-level call but not followed by assertion expecting revert
patterns:
- pattern-either:
- pattern: |
vm.expectRevert(...);
$CALL
$CHECK
- pattern: |
vm.expectRevert(...);
$CALL
- metavariable-pattern:
metavariable: $CALL
patterns:
- pattern-regex: \.call\(.*\)|\.delegatecall\(.*\)
- pattern-not-inside:
patterns:
- pattern: |
vm.expectRevert(...);
$CALL;
assertTrue(revertsAsExpected);
- id: sol-safety-expectrevert-no-args
languages: [solidity]
severity: ERROR
message: vm.expectRevert() must specify the revert reason
patterns:
- pattern: vm.expectRevert()
paths:
exclude:
- packages/contracts-bedrock/test/dispute/WETH98.t.sol
- id: sol-safety-natspec-semver-match
languages: [generic]
severity: ERROR
message: Semgrep defined in contract must match natspec $VERSION1 $VERSION2
patterns:
- pattern-either:
- pattern-regex: /// @custom:semver
(?P<VERSION1>[0-9]+\.[0-9]+\.[0-9]+(?:-[a-zA-Z0-9.]+)?)\s+string
public constant version =
"(?P<VERSION2>[0-9]+\.[0-9]+\.[0-9]+(?:-[a-zA-Z0-9.]+)?)";
- pattern-regex: /// @custom:semver
(?P<VERSION1>[0-9]+\.[0-9]+\.[0-9]+(?:-[a-zA-Z0-9.]+)?)\s+function
version\(\) public pure virtual returns \(string memory\)
\{\s+return
"(?P<VERSION2>[0-9]+\.[0-9]+\.[0-9]+(?:-[a-zA-Z0-9.]+)?)";
- pattern-regex:
/// @custom:semver (?P<VERSION1>[a-zA-Z0-9.+-]+)\s+function
version\(\) public pure override returns \(string memory\)
\{\s+return string\.concat\(super\.version\(\),
"(?P<VERSION2>[a-zA-Z0-9.+-]+)"\);
- metavariable-comparison:
comparison: $VERSION1 != $VERSION2
metavariable: $VERSION1
paths:
include:
- packages/contracts-bedrock/src
- id: sol-safety-no-public-in-libraries
languages: [generic]
severity: ERROR
message: Public functions in libraries are not allowed
patterns:
- pattern-inside: |
library $LIBRARY {
...
}
- pattern-regex: function\s+\w+\s*\([^)]*\)\s+(?:.*\s+)?(public|external)\s+.*\{
- id: sol-style-input-arg-fmt
languages: [solidity]
severity: ERROR
message: Named inputs to functions must be prepended with an underscore
pattern-regex: function\s+\w+\s*\(\s*([^)]*?\b\w+\s+(?!_)(?!memory\b)(?!calldata\b)(?!storage\b)(?!payable\b)\w+\s*(?=,|\)))
paths:
exclude:
- op-chain-ops/script/testdata/scripts/ScriptExample.s.sol
- packages/contracts-bedrock/test
- packages/contracts-bedrock/interfaces
- packages/contracts-bedrock/scripts/libraries/Solarray.sol
- packages/contracts-bedrock/src/universal/WETH98.sol
- packages/contracts-bedrock/src/L2/SuperchainWETH.sol
- packages/contracts-bedrock/src/governance/GovernanceToken.sol
- id: sol-style-return-arg-fmt
languages: [solidity]
severity: ERROR
message: Named return arguments to functions must be appended with an underscore
pattern-regex: returns\s*(\w+\s*)?\(\s*([^)]*?\b\w+\s+(?!memory\b)(?!calldata\b)(?!storage\b)(?!payable\b)\w+(?<!_)\s*(?=,|\)))
paths:
exclude:
- packages/contracts-bedrock/interfaces/dispute/IDelayedWETH.sol
- op-chain-ops/script/testdata/scripts/ScriptExample.s.sol
- packages/contracts-bedrock/interfaces
- packages/contracts-bedrock/test/safe-tools
- packages/contracts-bedrock/scripts/libraries/Solarray.sol
- packages/contracts-bedrock/scripts/interfaces/IGnosisSafe.sol
- id: sol-style-doc-comment
languages: [solidity]
severity: ERROR
message: Javadoc-style comments are not allowed, use `///` style doc comments instead
pattern-regex: (\/\*\*\n(\s+\*\s.*\n)+\s+\*\/)
paths:
exclude:
- packages/contracts-bedrock/test/safe-tools/CompatibilityFallbackHandler_1_3_0.sol
- id: sol-style-malformed-require
languages: [solidity]
severity: ERROR
message: Require statement style is malformed
patterns:
- pattern: require(..., $ERR);
- pattern-not: require($ERR);
- focus-metavariable: $ERR
- pattern-not-regex: \"(\w+:\s[^"]+)\"
- pattern-not-regex: string\.concat\(\"(\w+:\s[^"]+)\"\,[^"]+\)
- pattern-not-regex: \"([a-zA-Z0-9\s]+-[a-zA-Z0-9\s]+)\"
- pattern-not-regex: \"([a-zA-Z0-9\s]+-[a-zA-Z0-9\s]+-[a-zA-Z0-9\s]+)\"
paths:
exclude:
- packages/contracts-bedrock/src/libraries/Bytes.sol
- packages/contracts-bedrock/src/legacy/LegacyMintableERC20.sol
- packages/contracts-bedrock/src/cannon/MIPS.sol
- packages/contracts-bedrock/src/cannon/MIPS2.sol
- packages/contracts-bedrock/src/cannon/libraries/MIPSMemory.sol
- packages/contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol
- id: sol-style-malformed-revert
languages: [solidity]
severity: ERROR
message: Revert statement style is malformed
patterns:
- pattern: revert($MSG);
- pattern-not: revert $ERR(...);
- focus-metavariable: $MSG
- pattern-not-regex: \"(\w+:\s[^"]+)\"
- pattern-not-regex: string\.concat\(\"(\w+:\s[^"]+)\"\,[^"]+\)
- pattern-not-regex: \"([a-zA-Z0-9\s]+-[a-zA-Z0-9\s]+)\"
- pattern-not-regex: \"([a-zA-Z0-9\s]+-[a-zA-Z0-9\s]+-[a-zA-Z0-9\s]+)\"
paths:
exclude:
- packages/contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol
- id: sol-style-use-abi-encodecall
languages: [solidity]
severity: ERROR
message: Use abi.encodeCall instead of abi.encodeWithSelector
patterns:
- pattern-either:
- pattern: |
abi.encodeWithSelector(...);
- pattern: |
abi.encodeWithSignature(...);
- pattern-not: vm.expectRevert(abi.encodeWithSelector(...));
paths:
exclude:
- packages/contracts-bedrock/src/L1/OPContractsManager.sol
- packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol
- packages/contracts-bedrock/src/legacy/L1ChugSplashProxy.sol
- id: sol-style-enforce-require-msg
languages: [solidity]
severity: ERROR
message: Require statement must have an error message
patterns:
- pattern: require($ERR);
- pattern-not: require($ERR, $MSG);
paths:
exclude:
- packages/contracts-bedrock/src/universal/WETH98.sol
- id: sol-style-no-bare-imports
languages: [solidity]
severity: ERROR
message: Import specific components instead of the entire file
pattern-regex: import\s+"[^"]+"\s*;
paths:
exclude:
- packages/contracts-bedrock/test
- id: sol-safety-use-disable-initializer
languages: [solidity]
severity: ERROR
message: Proxied contracts (excluding predeploys) must disable initializers in constructor
patterns:
- pattern-regex: "///\\s*@custom:proxied\\s+true(?P<CONTRACT>[\\s\\S]*)"
- pattern-not-regex: "///\\s*@custom:predeploy.*(?P<REST>[\\s\\S]*)"
- focus-metavariable: $CONTRACT
- pattern: |
constructor(...) {
...
}
- pattern-not: |
constructor(...) {
...
_disableInitializers();
...
}
paths:
exclude:
- packages/contracts-bedrock/src/L1/SystemConfigInterop.sol
- packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol
- id: sol-safety-proper-initializer
languages: [solidity]
severity: ERROR
message: Proxied contracts must have an initialize function with the initializer modifier and external visibility
patterns:
- pattern-regex: "///\\s*@custom:proxied\\s+true(?P<CONTRACT>[\\s\\S]*)"
- focus-metavariable: $CONTRACT
- pattern: |
function initialize(...) {
...
}
- pattern-not: |
function initialize(...) external initializer {
...
}
paths:
exclude:
- packages/contracts-bedrock/src/L1/SystemConfig.sol
- packages/contracts-bedrock/src/L1/SystemConfigInterop.sol
// Semgrep tests for Solidity rules are defined in this file.
// Semgrep tests do not need to be valid Solidity code but should be syntactically correct so that
// Semgrep can parse them. You don't need to be able to *run* the code here but it should look like
// the code that you expect to catch with the rule.
//
// Semgrep testing 101
// Use comments like "ruleid: <rule-id>" to assert that the rule catches the code.
// Use comments like "ok: <rule-id>" to assert that the rule does not catch the code.
/// NOTE: Semgrep limitations mean that the rule for this check is defined as a relatively loose regex that searches the
/// remainder of the file after the `@custom:proxied` natspec tag is detected. This means that we must test the case
/// without this natspec tag BEFORE the case with the tag or the rule will apply to the remainder of the file.
// If no proxied natspec, initialize functions can have no initializer modifier and be public or external
contract SemgrepTest__sol_safety_proper_initializer {
// ok: sol-safety-proper-initializer
function initialize() external {
// ...
}
// ok: sol-safety-proper-initializer
function initialize() public {
// ...
}
}
/// NOTE: the proxied natspec below is valid for all contracts after this one
/// @custom:proxied true
contract SemgrepTest__sol_safety_proper_initializer {
// ok: sol-safety-proper-initializer
function initialize() external initializer {
// ...
}
// ruleid: sol-safety-proper-initializer
function initialize() external {
// ...
}
// ruleid: sol-safety-proper-initializer
function initialize() public initializer {
// ...
}
// ruleid: sol-safety-proper-initializer
function initialize() public {
// ...
}
}
// Semgrep tests for Solidity rules are defined in this file.
// Semgrep tests do not need to be valid Solidity code but should be syntactically correct so that
// Semgrep can parse them. You don't need to be able to *run* the code here but it should look like
// the code that you expect to catch with the rule.
//
// Semgrep testing 101
// Use comments like "ruleid: <rule-id>" to assert that the rule catches the code.
// Use comments like "ok: <rule-id>" to assert that the rule does not catch the code.
/// NOTE: Semgrep limitations mean that the rule for this check is defined as a relatively loose regex that searches the
/// remainder of the file after the `@custom:proxied` natspec tag is detected.
/// This means that we must test the case without this natspec tag BEFORE the case with the tag or the rule will apply
/// to the remainder of the file.
// If no predeploy natspec, disableInitializers can or cannot be called in constructor
contract SemgrepTest__sol_safety_use_disable_initializer {
// ok: sol-safety-use-disable-initializer
constructor() {
// ...
_disableInitializers();
// ...
}
// ok: sol-safety-use-disable-initializer
constructor() {
// ...
}
}
// if no predeploy natspec, disableInitializers must be called in constructor
/// @custom:proxied true
contract SemgrepTest__sol_safety_use_disable_initializer {
// ok: sol-safety-use-disable-initializer
constructor() {
// ...
_disableInitializers();
// ...
}
// ruleid: sol-safety-use-disable-initializer
constructor() {
// ...
}
}
/// NOTE: the predeploy natspec below is valid for all contracts after this one
/// @custom:predeploy
// if predeploy natspec, disableInitializers may or may not be called in constructor
contract SemgrepTest__sol_safety_use_disable_initializer {
// ok: sol-safety-use-disable-initializer
constructor() {
// ...
}
// ok: sol-safety-use-disable-initializer
constructor() {
// ...
_disableInitializers();
// ...
}
}
This diff is collapsed.
# Common large paths
node_modules/
build/
dist/
vendor/
.env/
.venv/
.tox/
*.min.js
# Semgrep rules folder
.semgrep/
# Semgrep-action log folder
.semgrep_logs/
# Test contracts the scripts folder
op-chain-ops/script/testdata/scripts/
\ No newline at end of file
## Optimism Monorepo Documentation
The `docs/` directory contains Optimism documentation closely tied to the implementation details of the monorepo (https://github.com/ethereum-optimism/optimism).
The directory layout is divided into the following sub-directories.
- [`postmortems/`](./postmortems/): Timestamped post-mortem documents.
- [`security-reviews`](./security-reviews/): Audit summaries and other security review documents.
# Pull Request Guidelines and Best Practices
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Overview](#overview)
- [PR Lifecycle Best Practices](#pr-lifecycle-best-practices)
- [Before Starting PRs](#before-starting-prs)
- [Opening PRs](#opening-prs)
- [Reviewing PRs](#reviewing-prs)
- [Merging PRs](#merging-prs)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Overview
This document contains guidelines and best practices in PRs that should be enforced as much as possible. The motivations and goals behind these best practices are:
- **Ensure thorough reviews**: By the time the PR is merged, at least one other person—because there is always at least one reviewer—should understand the PR’s changes just as well as the PR author. This helps improve security by reducing bugs and single points of failure (i.e. there should never be only one person who understands certain code).
- **Reduce PR churn**: PRs should be quickly reviewable and mergeable without much churn (both in terms of code rewrites and comment cycles). This saves time by reducing the need for rebases due to conflicts. Similarly, too many review cycles are a burden for both PR authors and reviewers, and results in “review fatigue” where reviews become less careful and thorough, increasing the likelihood of bugs.
- **Traceability**: We should be able to look back at issues and PRs to understand why a certain decision was made or why a given approach was taken.
## PR Lifecycle Best Practices
This is organized by current state of PR, so it can be easily referenced frequently to help internalize the guidelines.
### Before Starting PRs
- **Keep PRs Focused**: Each PR should be a single, narrow, well-defined scope.
### Opening PRs
- **Review Your Own Code**: Reviewing the diff yourself *in a different context*, can be very useful for discovering issues, typos, and bugs before opening the PR. For example, write code in your IDE, then review it in the GitHub diff view. The perspective change forces you to slow down and helps reveal issues you may have missed.
- **Explain Decisions/Tradeoffs**: Explain rationale for any design/architecture decisions and implementation details in the PR description. If it closes an issue, remember to mention the issue it closes, e.g. `Closes <issueUrl>`. Otherwise, just link to the issue. If there is no issue, whatever details would have been in the issue should be in the PR description.
- **Guide PR reviewers:** Let them know about areas of concern, under-tested areas, or vague requirements that should be ironed out.
### Reviewing PRs
- **Verify Requirements are Met**: If the PR claims to fix or close an issue, check that all the requirements in the issue are actually met. Otherwise the issue may be in a good place to merge, but just shouldn’t close the issue.
- **Focus on Tests**: The tests are the spec and therefore should be the focus of reviews. If tests are thorough and passing, the rest is an implementation detail (to an extent—don’t skip source code reviews) that can be fixed in a future optimization/cleanup PR. Make sure edge case behaviors are defined and handled.
- **Think like an Auditor:** What edge cases were ignored? How can the code break? When might it behave incorrectly and unexpectedly? What code should have been changed that isn’t in the diff? What implicit assumptions are made that might be invalid?
- **Ensure Comment Significance is Clear**: Indicate which comments are nits/optionals that the PR author can resolve, compared to which you want to follow up on.
- Prefix non-blocking comments with `[nit]` or `[non-blocking]`.
- **Consider Reviewing in Your IDE**: For example, GitHub has [this VSCode extension](https://marketplace.visualstudio.com/items?itemName=GitHub.vscode-pull-request-github) to review PRs. This provides more code context and enables review to benefit from your standard lints and IDE features, whereas GitHub’s diff shows none of that.
### Merging PRs
- **Resolve all Comments**: Comments can be resolved by (1) the PR author for nits/optionals, (2) the author or reviewer after discussions, or (3) extracting the comment into an issue to address in a future PR. For (3), ensure the new issue links to the specific comment thread. This is currently enforced by GitHub's merge requirements.
- **Other Standard Merge Requirements**: The PR must be approved by the appropriate reviewers, CI must pass, and other standard merge requirements apply.
This diff is collapsed.
This diff is collapsed.
# [Public] 4/26 Transaction Delays Post-Mortem
# Incident Summary
On April 26, 2023 between the hours of 1900 and 2100 UTC, OP Mainnet experienced degraded service following a ~10x increase in the rate of `eth_sendRawTransaction` requests.
While the sequencer remained online and continued to process transactions, users experienced transaction inclusion delays, rate limit errors, problems syncing nodes, and other symptoms of degraded performance.
The issue resolved itself once the rate of `eth_sendRawTransaction` requests subsided. However, we did not communicate the status of the network to our users, nor did we execute on mitigations quickly enough that could have reduced the impact of the degraded service. We recognize that this was a frustrating experience that caused significant impact to our users, particularly those participating in the DeFi ecosystem. We are sorry for this user experience, and hope that this retrospective provides insight into what happened and what we are doing to prevent similar issues from happening again.
# Leadup
OP Mainnet has not yet been upgraded to Bedrock. As a result, all OP Mainnet nodes run two components to sync the L2 chain:
- The `data-transport-layer` (or DTL), which indexes transactions from L1 or another L2 node.
- `l2geth`, which executes the transactions indexed by the DTL and maintains the L2 chain’s state.
The DTL and `l2geth` retrieve new data by polling for it. The DTL polls L1 or L2 depending on how it is configured, and `l2geth` polls the DTL. The higher the transaction throughput is, the more transactions will have to be processed between each “tick” of the polling loop. When throughput is too high, it is possible for the number of transactions between each tick to exceed what can be processed in a single tick. In this case, multiple ticks are necessary to catch up to the tip of the chain.
To protect the sequencer against traffic spikes, we route read requests - specifically `eth_getBlockRange`, which the DTL uses to sync from L2 - to a read-only replica rather than to the sequencer itself.
At 1915 UTC, sequencer throughput jumped from the usual ~8 transactions per second to a peak of 95 transactions per second over the course of 15 minutes.
# Causes
As a result of the increased throughput, our read-only replica started to fall behind. The graph below shows the delay, in seconds, between the sequencer creating new blocks and them being indexed by the read-only replica:
![outage.png](2023-04-26-transaction-delays/outage.png)
This meant that while the sequencer was processing transactions normally, users were unable to see their transactions confirmed on chain for several minutes. For DeFi apps relying on an accurate view of on-chain data, this caused transactions to be reverted and positions to be liquidated. It also made it difficult to retry transactions, since the user’s wallet nonce may have increased on the sequencer but not on the replica.
This issue was ecosystem wide. Infrastructure providers run replicas of their own, which use the same polling-based mechanism to sync data. This likely further increased the delay between when transactions were processed, and when they appeared as confirmed. This is not an error on the part of infrastructure providers, but rather a flaw in how the pre-Bedrock system is designed.
# Recovery and Lessons Learned
The issue resolved itself once the transaction volume dropped back down to normal levels. However, we did not communicate with our community for the duration of the outage. This is a significant miss, and for that we apologize. Going forward, we will do the following in an effort to avoid similar issues:
- We will add monitoring for replica lag, so that we can proactively route traffic directly to the sequencer when throughput increases beyond what the sync mechanism can handle.
- Though rate limits were not the direct cause of this incident, we will increase rate limits so that node operators can poll for new blocks more frequently.
Lastly, we will upgrade mainnet to Bedrock later this year. Bedrock fixes these issues from an architectural perspective. Specifically:
- There will be no more DTL, or polling-based sync mechanism. Blocks are either derived from L1, or gossipped over a peer-to-peer network.
- Blocks will be created every two seconds rather than on every transaction. This allows data to propagate across the network more efficiently and predictably.
- The sequencer will have a private mempool. This will allow fee-replacement transactions to work properly, and eliminate the need for aggressive rate limiting on the sequencer.
We recognize how frustrating an issue like this can be, and that is compounded when we are not proactively communicating. We’re sorry our users had this experience. We’re committed to applying these learnings moving forward and appreciate our community holding us accountable.
# Audit Report - OP Cannon
| | |
| -------------- | ------------------------------------------------------------------------- |
| **Audit Date** | Oct 2nd 2024 - Oct 3rd 2024 |
| **Auditor** | 3DOC Security ([@3docSec](https://x.com/3docSec)) |
| **Version 1** | Oct 3rd 2024. |
<br clear="both" />
# Contents
- [Audit Report - OP cannon](#audit-report---op-cannon)
- [Contents](#contents)
- [Disclaimer](#disclaimer)
- [About 3DOC](#about-3doc)
- [Scope](#scope)
- [Severity Classification](#severity-classification)
- [Summary](#summary)
- [Findings](#findings)
- [Low Risk Findings (1)](#low-risk-findings-1)
- [1. Op-challenger Docker image does not include Cannon embeds](#-op-challenger-docker-image-does-not-include-cannon-embeds)
# Disclaimer
_The following audit report is based on the information and code provided by the client, and any findings or recommendations are made solely on the basis of this information. While the Auditor has exercised due care and skill in conducting the audit, it cannot be guaranteed that all issues have been identified and that there are no undiscovered errors or vulnerabilities in the code._
_Furthermore, this report is not an endorsement or certification of the protocol, and the Auditor does not assume any responsibility for any losses or damages that may result from the use of the smart contracts, either in their current form or in any modified version thereof._
# About 3DOC
3DOC is a top ranked Smart Contract Auditor doing audits on Code4rena (www.code4rena.com), having ranked 1st in multiple contests in [solo](https://code4rena.com/@3docSec) and [team](https://code4rena.com/@RadiantLabs) audits, including the [Optimism superchain contest](https://code4rena.com/audits/2024-07-optimism-superchain) in July 2024.<br>
He can also be booked for conducting Private Audits.
Contact: <br>
X: [@3DocSec](https://x.com/3DocSec)
e-mail: [hello@3doc.fr](mailto:hello@3doc.fr)
# Scope
The scope of the audit is the following Pull Request in the client's GitHub repository:
https://github.com/ethereum-optimism/optimism/pull/12050
The change consists of a core update for supporting the `F_GETFD` syscall in the MIPS VM, [provided with this commit](https://github.com/ethereum-optimism/optimism/pull/12050/commits/7c8257d3574a2a76ab90f8129c7b532d68049944), and several additional updates accommodating the VM version bump that came with the core change.
# Severity Classification
| Severity | Impact: High | Impact: Medium | Impact: Low |
| ---------------------- | ------------ | -------------- | ----------- |
| **Likelihood: High** | ![high] | ![high] | ![medium] |
| **Likelihood: Medium** | ![high] | ![medium] | ![low] |
| **Likelihood: Low** | ![medium] | ![low] | ![low] |
**Impact** - the technical, economic and reputation damage of a successful attack
**Likelihood** - the chance that a particular vulnerability is discovered and exploited
# Summary
| Severity | Total |
| -------------- | ----- |
| ![high] | 0 |
| ![medium] | 0 |
| ![low] | 0 |
| ![information] | 0 |
# Findings
## Low Risk findings (0)
### [False positive] Op-challenger Docker image does not include Cannon embeds
#### Description
The change in scope added a new implementation of the Cannon VM, which was called `VersionSingleThreaded2`. Cannon has now three versions (`VersionSingleThreaded`, `VersionSingleThreaded2`, and `VersionMultiThreaded`).
The op-challenger program makes use of the Cannon VM in several places via the configured `VmBin` path, which point to the `multicannon` command line. This one reads the State version from the input state and selects the right Cannon VM accordingly (`cannon/multicannon/exec.go:L81`).
If we look at the Docker challenger image generated by the `make golang-docker` command, however, we can see it doesn't contain an `embeds` folder:
```
docker run -t us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger find / -name embeds
```
But it however has the `cannon` command pointing to the `multicannon` multiplexer:
```
➜ optimism git:(52d0e60c1) ✗ docker run -t us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger cannon | head -2
NAME:
multicannon - MIPS Fault Proof tool
➜ optimism git:(52d0e60c1) ✗
```
This issue appears to be pre-existing to the changes in scope; using Docker images to run the challenger is [mentioned as option](https://docs.optimism.io/builders/chain-operators/tools/op-challenger), but only as alternative option, hence the Low risk assessed for this finding.
#### Impact
Because of this issue, challenger instances operated in a Docker container won't be able to function properly.
#### Recommendation
Consider modifying the Docker build chain to include the `embeds` folder.
Consider extending the current e2e test suite to cover execution from Docker images.
#### Discussion
> @inphi The cannon-2 implementation that supports go1.22 is now embedded into the cannon cli binary. Note that these embeds are not actual files that you can find in the docker container filesystem. But rather an embedded filesystem inside the Go binary - https://pkg.go.dev/embed.
> @3DOC Oh yes I see that. So those are included in an embedded filesystem, I missed that
[high]: https://img.shields.io/badge/-HIGH-b02319 "HIGH"
[medium]: https://img.shields.io/badge/-MEDIUM-orange "MEDIUM"
[low]: https://img.shields.io/badge/-LOW-FFD700 "LOW"
[information]: https://img.shields.io/badge/-INFORMATION-darkgreen "INFORMATION"
[fixed]: https://img.shields.io/badge/-FIXED-brightgreen "FIXED"
[acknowledged]: https://img.shields.io/badge/-ACKNOWLEDGED-blue "ACKNOWLEDGED"
[disputed]: https://img.shields.io/badge/-DISPUTED-lightgrey "DISPUTED"
[reported]: https://img.shields.io/badge/-REPORTED-lightblue "REPORTED"
[partiallyfixed]: https://img.shields.io/badge/-PARTIALLY_FIXED-lightgreen "PARTIALLTY FIXED"
This diff is collapsed.
# Checks that TODO comments have corresponding issues.
todo-checker:
./ops/scripts/todo-checker.sh
# Runs semgrep on the entire monorepo.
semgrep:
semgrep scan --config .semgrep/rules/ --error .
# Runs semgrep tests.
semgrep-test:
semgrep scan --test --config .semgrep/rules/ .semgrep/tests/
# Runs shellcheck.
shellcheck:
find . -type f -name '*.sh' -not -path '*/node_modules/*' -not -path './packages/contracts-bedrock/lib/*' -not -path './packages/contracts-bedrock/kout*/*' -exec sh -c 'echo "Checking $1"; shellcheck "$1"' _ {} \;
# Getting Started
Running a Kurtosis Devnet has the following prerequisites:
- Kurtosis must be installed
- Docker Desktop must be installed and running
Platform specific installation instructions for Kurtosis may be found [in Kurtosis documentation](https://docs.kurtosis.com/install/),
but for Mac users, the following command should suffice:
```
brew install kurtosis-tech/tap/kurtosis-cli
```
Check your Kurtosis version with `kurtosis version`. The current ideal version for these devnets is `1.4.3`.
Docker Desktop may be substituted by an alternative like Orbstack if you have that installed.
# Running A Devnet
To see available devnets, consult the `justfile` to see what `.*-devnet` targets exist, currently
- `mini-devnet`
- `simple-devnet`
- `interop-devnet`
- `user-devnet`
You can read over the referenced `yaml` files located in this directory to see the network definition which would be deployed. Mini and Simple are example network definitions, and User expects a provided network definition.
To run the Interop Devnet, simply:
```
just interop-devnet
```
If all works as expected, you should see a collection of containers appear in Docker. Some of them are Kurtosis infrastructure, while others are the actual hosts for your network. You can observe that the network is running by searching for "supervisor" and watching its logs.
## Resolving Issues
Here is a list of potential pitfalls when running Kurtosis and known solutions.
### `error ensuring kurtosis engine is running`
This error indicates Docker Desktop (or your alternative) is not running.
### `network with name kt-interop-devnet already exists`
If your kurtosis network is taken down and destroyed through docker, it is possible that the network resources are left around, preventing you from starting up a new network. To resolve, run:
```
kurtosis engine stop
docker network rm kt-interop-devnet
```
You can use `docker network ls` to inspect for networks to remove if the error message specifies some other network.
# Kurtosis-devnet support
## devnet specification
Due to sandboxing issues across repositories, we currently rely on a slight
superset of the native optimism-package specification YAML file, via go
templates.
So that means in particular that the regular optimism-package input is valid
here.
Additional custom functions:
- localDockerImage(PROJECT): builds a docker image for PROJECT based on the
current branch content.
- localContractArtifacts(LAYER): builds a contracts bundle based on the current
branch content (note: LAYER is currently ignored, we might need to revisit)
Example:
```yaml
...
op_contract_deployer_params:
image: {{ localDockerImage "op-deployer" }}
l1_artifacts_locator: {{ localContractArtifacts "l1" }}
l2_artifacts_locator: {{ localContractArtifacts "l2" }}
...
```
The list of supported PROJECT values can be found in `justfile` as a
PROJECT-image target. Adding a target there will immediately available to the
template engine.
## devnet deployment tool
Located in cmd/main.go, this tool handle the creation of an enclave matching the
provided specification.
The expected entry point for interacting with it is the corresponding
`just devnet SPEC` target.
This takes an optional 2nd argument, that can be used to provide values for the
template interpretation.
Note that a SPEC of the form `FOO.yaml` will yield a kurtosis enclave named
`FOO-devnet`
Convenience targets can be added to `justfile` for specific specifications, for
example:
```just
interop-devnet: (devnet "interop.yaml")
```
## devnet output
One important aspect of the devnet workflow is that the output should be
*consumable*. Going forward we want to integrate them into larger worfklows
(serving as targets for tests for example, or any other form of automation).
To address this, the deployment tool outputs a document with (hopefully!) useful
information. Here's a short extract:
```json
{
"l1": {
"name": "Ethereum",
"nodes": [
{
"cl": "http://localhost:53689",
"el": "http://localhost:53620"
}
]
},
"l2": [
{
"name": "op-kurtosis-1",
"id": "2151908",
"services": {
"batcher": "http://localhost:57259"
},
"nodes": [
{
"cl": "http://localhost:57029",
"el": "http://localhost:56781"
}
],
"addresses": {
"addressManager": "0x1b89c03f2d8041b2ba16b5128e613d9279195d1a",
...
}
},
...
],
"wallets": {
"baseFeeVaultRecipient": {
"address": "0xF435e3ba80545679CfC24E5766d7B02F0CCB5938",
"private_key": "0xc661dd5d4b091676d1a5f2b5110f9a13cb8682140587bd756e357286a98d2c26"
},
...
}
}
```
## further interactions
Beyond deployment, we can interact with enclaves normally.
In particular, cleaning up a devnet can be achieved using
`kurtosis rm FOO-devnet` and the likes.
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/deploy"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis"
"github.com/urfave/cli/v2"
)
type config struct {
templateFile string
dataFile string
kurtosisPackage string
enclave string
environment string
dryRun bool
baseDir string
kurtosisBinary string
}
func newConfig(c *cli.Context) (*config, error) {
cfg := &config{
templateFile: c.String("template"),
dataFile: c.String("data"),
kurtosisPackage: c.String("kurtosis-package"),
enclave: c.String("enclave"),
environment: c.String("environment"),
dryRun: c.Bool("dry-run"),
kurtosisBinary: c.String("kurtosis-binary"),
}
// Validate required flags
if cfg.templateFile == "" {
return nil, fmt.Errorf("template file is required")
}
cfg.baseDir = filepath.Dir(cfg.templateFile)
return cfg, nil
}
func writeEnvironment(path string, env *kurtosis.KurtosisEnvironment) error {
out := os.Stdout
if path != "" {
var err error
out, err = os.Create(path)
if err != nil {
return fmt.Errorf("error creating environment file: %w", err)
}
defer out.Close()
}
enc := json.NewEncoder(out)
enc.SetIndent("", " ")
if err := enc.Encode(env); err != nil {
return fmt.Errorf("error encoding environment: %w", err)
}
return nil
}
func mainAction(c *cli.Context) error {
ctx := context.Background()
cfg, err := newConfig(c)
if err != nil {
return fmt.Errorf("error parsing config: %w", err)
}
deployer := deploy.NewDeployer(
deploy.WithKurtosisPackage(cfg.kurtosisPackage),
deploy.WithEnclave(cfg.enclave),
deploy.WithDryRun(cfg.dryRun),
deploy.WithKurtosisBinary(cfg.kurtosisBinary),
deploy.WithTemplateFile(cfg.templateFile),
deploy.WithDataFile(cfg.dataFile),
deploy.WithBaseDir(cfg.baseDir),
)
env, err := deployer.Deploy(ctx, nil)
if err != nil {
return fmt.Errorf("error deploying environment: %w", err)
}
return writeEnvironment(cfg.environment, env)
}
func getFlags() []cli.Flag {
return []cli.Flag{
&cli.StringFlag{
Name: "template",
Usage: "Path to the template file (required)",
Required: true,
},
&cli.StringFlag{
Name: "data",
Usage: "Path to JSON data file (optional)",
},
&cli.StringFlag{
Name: "kurtosis-package",
Usage: "Kurtosis package to deploy (optional)",
Value: kurtosis.DefaultPackageName,
},
&cli.StringFlag{
Name: "enclave",
Usage: "Enclave name (optional)",
Value: kurtosis.DefaultEnclave,
},
&cli.StringFlag{
Name: "environment",
Usage: "Path to JSON environment file output (optional)",
},
&cli.BoolFlag{
Name: "dry-run",
Usage: "Dry run mode (optional)",
},
&cli.StringFlag{
Name: "kurtosis-binary",
Usage: "Path to kurtosis binary (optional)",
Value: "kurtosis",
},
}
}
func main() {
app := &cli.App{
Name: "kurtosis-devnet",
Usage: "Deploy and manage Optimism devnet using Kurtosis",
Flags: getFlags(),
Action: mainAction,
}
if err := app.Run(os.Args); err != nil {
log.Fatalf("Error: %v\n", err)
}
}
package main
import (
"os"
"path/filepath"
"testing"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
)
func TestParseFlags(t *testing.T) {
tests := []struct {
name string
args []string
wantCfg *config
wantError bool
}{
{
name: "valid configuration",
args: []string{
"--template", "path/to/template.yaml",
"--enclave", "test-enclave",
},
wantCfg: &config{
templateFile: "path/to/template.yaml",
enclave: "test-enclave",
kurtosisPackage: kurtosis.DefaultPackageName,
},
wantError: false,
},
{
name: "missing required template",
args: []string{"--enclave", "test-enclave"},
wantCfg: nil,
wantError: true,
},
{
name: "with data file",
args: []string{
"--template", "path/to/template.yaml",
"--data", "path/to/data.json",
},
wantCfg: &config{
templateFile: "path/to/template.yaml",
dataFile: "path/to/data.json",
enclave: kurtosis.DefaultEnclave,
kurtosisPackage: kurtosis.DefaultPackageName,
},
wantError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var cfg *config
app := &cli.App{
Flags: getFlags(),
Action: func(c *cli.Context) (err error) {
cfg, err = newConfig(c)
return
},
}
// Prepend program name to args as urfave/cli expects
args := append([]string{"prog"}, tt.args...)
err := app.Run(args)
if tt.wantError {
assert.Error(t, err)
return
}
require.NoError(t, err)
require.NotNil(t, cfg)
assert.Equal(t, tt.wantCfg.templateFile, cfg.templateFile)
assert.Equal(t, tt.wantCfg.enclave, cfg.enclave)
assert.Equal(t, tt.wantCfg.kurtosisPackage, cfg.kurtosisPackage)
if tt.wantCfg.dataFile != "" {
assert.Equal(t, tt.wantCfg.dataFile, cfg.dataFile)
}
})
}
}
func TestMainFuncValidatesConfig(t *testing.T) {
// Create a temporary directory for test files
tmpDir, err := os.MkdirTemp("", "main-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
// Create test template
templatePath := filepath.Join(tmpDir, "template.yaml")
err = os.WriteFile(templatePath, []byte("name: test"), 0644)
require.NoError(t, err)
// Create environment output path
envPath := filepath.Join(tmpDir, "env.json")
app := &cli.App{
Flags: getFlags(),
Action: func(c *cli.Context) error {
cfg, err := newConfig(c)
if err != nil {
return err
}
// Verify config values
assert.Equal(t, templatePath, cfg.templateFile)
assert.Equal(t, envPath, cfg.environment)
assert.True(t, cfg.dryRun)
// Create an empty environment file to simulate successful deployment
return os.WriteFile(envPath, []byte("{}"), 0644)
},
}
args := []string{
"prog",
"--template", templatePath,
"--environment", envPath,
"--dry-run",
}
err = app.Run(args)
require.NoError(t, err)
// Verify the environment file was created
assert.FileExists(t, envPath)
}
name: github.com/ethereum-optimism/optimism/kurtosis-devnet/fileserver
description: |-
Kurtosis package for serving files from the build directory
replace: {}
FILESERVER_HTTP_PORT_ID = "http"
FILESERVER_HTTP_PORT_NUM = 80
FILESERVER_IMAGE = "nginx:latest"
def get_used_ports():
used_ports = {
FILESERVER_HTTP_PORT_ID: PortSpec(
number=FILESERVER_HTTP_PORT_NUM,
)
}
return used_ports
def run(plan, source_path):
service_name = "fileserver"
config = get_fileserver_config(
plan,
service_name,
source_path,
)
service = plan.add_service(service_name, config)
return service_name
def get_fileserver_config(plan, service_name, source_path):
files = {}
# Upload content to container
content_artifact = plan.upload_files(
src=source_path,
name="{}-content".format(service_name),
)
files["/content"] = content_artifact
# Add nginx config file
nginx_conf = plan.upload_files(
src="static_files/nginx",
name="{}-nginx-conf".format(service_name),
)
files["/etc/nginx/conf.d"] = nginx_conf
ports = get_used_ports()
return ServiceConfig(
image=FILESERVER_IMAGE,
ports=ports,
cmd=["nginx", "-g", "daemon off;"],
files=files,
)
server {
listen 80;
server_name _;
root /content;
location / {
try_files $uri $uri/ =404;
}
}
{
"interop": true,
"l2s": {
"2151908": {
"nodes": ["op-geth", "op-geth"]
},
"2151909": {
"nodes": ["op-reth"]
}
},
"overrides": {
"flags": {
"log_level": "--log.level=debug"
}
}
}
\ No newline at end of file
{{- $local_images := dict
"op_node" (localDockerImage "op-node")
"op_batcher" (localDockerImage "op-batcher")
"op_challenger" (localDockerImage "op-challenger")
"op_proposer" (localDockerImage "op-proposer")
"op_deployer" (localDockerImage "op-deployer")
"op_supervisor" (localDockerImage "op-supervisor")
-}}
{{- $urls := dict
"prestate" (localPrestate.URL)
"l1_artifacts" (localContractArtifacts "l1")
"l2_artifacts" (localContractArtifacts "l2")
-}}
{{- $flags := dict
"log_level" "--log.level=info"
-}}
---
optimism_package:
interop:
enabled: true
supervisor_params:
image: {{ $local_images.op_supervisor }}
dependency_set: |
{
"dependencies": {
"2151908": {
"chainIndex": "2151908",
"activationTime": 0,
"historyMinTime": 0
},
"2151909": {
"chainIndex": "2151909",
"activationTime": 0,
"historyMinTime": 0
}
}
}
extra_params:
- {{ $flags.log_level }}
chains:
- participants:
- el_type: op-geth
el_image: ""
el_log_level: ""
el_extra_env_vars: {}
el_extra_labels: {}
el_extra_params: []
el_tolerations: []
el_volume_size: 0
el_min_cpu: 0
el_max_cpu: 0
el_min_mem: 0
el_max_mem: 0
cl_type: op-node
cl_image: {{ $local_images.op_node }}
cl_log_level: ""
cl_extra_env_vars: {}
cl_extra_labels: {}
cl_extra_params: []
cl_tolerations: []
cl_volume_size: 0
cl_min_cpu: 0
cl_max_cpu: 0
cl_min_mem: 0
cl_max_mem: 0
node_selectors: {}
tolerations: []
count: 1
network_params:
network: "kurtosis"
network_id: "2151908"
seconds_per_slot: 2
name: "op-kurtosis-1"
fjord_time_offset: 0
granite_time_offset: 0
holocene_time_offset: 0
interop_time_offset: 0
fund_dev_accounts: true
batcher_params:
image: {{ $local_images.op_batcher }}
extra_params:
- {{ $flags.log_level }}
challenger_params:
image: {{ $local_images.op_challenger }}
cannon_prestate_path: ""
cannon_prestates_url: {{ $urls.prestate }}
extra_params:
- {{ $flags.log_level }}
proposer_params:
image: {{ $local_images.op_proposer }}
extra_params:
- {{ $flags.log_level }}
game_type: 1
proposal_interval: 10m
mev_params:
rollup_boost_image: ""
builder_host: ""
builder_port: ""
additional_services: []
- participants:
- el_type: op-geth
el_image: ""
el_log_level: ""
el_extra_env_vars: {}
el_extra_labels: {}
el_extra_params: []
el_tolerations: []
el_volume_size: 0
el_min_cpu: 0
el_max_cpu: 0
el_min_mem: 0
el_max_mem: 0
cl_type: op-node
cl_image: {{ $local_images.op_node }}
cl_log_level: ""
cl_extra_env_vars: {}
cl_extra_labels: {}
cl_extra_params: []
cl_tolerations: []
cl_volume_size: 0
cl_min_cpu: 0
cl_max_cpu: 0
cl_min_mem: 0
cl_max_mem: 0
node_selectors: {}
tolerations: []
count: 1
network_params:
network: "kurtosis"
network_id: "2151909"
seconds_per_slot: 2
name: "op-kurtosis-2"
fjord_time_offset: 0
granite_time_offset: 0
holocene_time_offset: 0
interop_time_offset: 0
fund_dev_accounts: true
batcher_params:
image: {{ $local_images.op_batcher }}
extra_params:
- {{ $flags.log_level }}
challenger_params:
image: {{ $local_images.op_challenger }}
cannon_prestate_path: ""
cannon_prestates_url: {{ $urls.prestate }}
extra_params:
- {{ $flags.log_level }}
proposer_params:
image: {{ $local_images.op_proposer }}
extra_params:
- {{ $flags.log_level }}
game_type: 1
proposal_interval: 10m
mev_params:
rollup_boost_image: ""
builder_host: ""
builder_port: ""
additional_services: []
op_contract_deployer_params:
image: {{ $local_images.op_deployer }}
l1_artifacts_locator: {{ $urls.l1_artifacts }}
l2_artifacts_locator: {{ $urls.l2_artifacts }}
global_deploy_overrides:
faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate }}
global_log_level: "info"
global_node_selectors: {}
global_tolerations: []
persistent: false
ethereum_package:
network_params:
preset: minimal
genesis_delay: 5
additional_preloaded_contracts: |
{
"0x4e59b44847b379578588920cA78FbF26c0B4956C": {
"balance": "0ETH",
"code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3",
"storage": {},
"nonce": "1"
}
}
set shell := ["/bin/bash", "-c"]
test:
go test --tags=testonly ./...
_kurtosis-run PACKAGE_NAME ARG_FILE ENCLAVE:
kurtosis run {{PACKAGE_NAME}} --args-file {{ARG_FILE}} --enclave {{ENCLAVE}} --show-enclave-inspect=false --image-download=missing
# Internal recipes for kurtosis-devnet
_contracts-build BUNDLE='contracts-bundle.tar.gz':
just ../packages/contracts-bedrock/forge-build
tar -czf {{BUNDLE}} -C ../packages/contracts-bedrock artifacts forge-artifacts cache
_prestate-build PATH='.':
docker buildx build --output {{PATH}} --progress plain -f ../op-program/Dockerfile.repro ../
_docker_build TAG TARGET CONTEXT DOCKERFILE *ARGS:
docker buildx build -t {{TAG}} \
-f {{CONTEXT}}/{{DOCKERFILE}} \
{{ if TARGET != '' { "--target " + TARGET } else { "" } }} \
--build-arg GIT_COMMIT={git_commit} \
--build-arg GIT_DATE={git_date} \
{{ ARGS }} \
{{CONTEXT}}
_docker_build_stack TAG TARGET *ARGS: (_docker_build TAG TARGET "../" "ops/docker/op-stack-go/Dockerfile" ARGS)
cannon-image TAG='cannon:devnet': (_docker_build_stack TAG "cannon-target")
da-server-image TAG='da-server:devnet': (_docker_build_stack TAG "da-server-target")
op-batcher-image TAG='op-batcher:devnet': (_docker_build_stack TAG "op-batcher-target")
# TODO: this is a temporary hack to get the kona version right.
# Ideally the Dockerfile should be self-sufficient (right now we depend on
# docker-bake.hcl to do the right thing).
op-challenger-image TAG='op-challenger:devnet': (_docker_build_stack TAG "op-challenger-target" "--build-arg" "KONA_VERSION=kona-client-v0.1.0-beta.6")
op-conductor-image TAG='op-conductor:devnet': (_docker_build_stack TAG "op-conductor-target")
op-deployer-image TAG='op-deployer:devnet': (_docker_build_stack TAG "op-deployer-target")
op-dispute-mon-image TAG='op-dispute-mon:devnet': (_docker_build_stack TAG "op-dispute-mon-target")
op-node-image TAG='op-node:devnet': (_docker_build_stack TAG "op-node-target")
op-program-image TAG='op-program:devnet': (_docker_build_stack TAG "op-program-target")
op-proposer-image TAG='op-proposer:devnet': (_docker_build_stack TAG "op-proposer-target")
op-supervisor-image TAG='op-supervisor:devnet': (_docker_build_stack TAG "op-supervisor-target")
op-wheel-image TAG='op-wheel:devnet': (_docker_build_stack TAG "op-wheel-target")
KURTOSIS_PACKAGE := "github.com/ethpandaops/optimism-package"
# Devnet template recipe
devnet TEMPLATE_FILE DATA_FILE="" NAME="":
#!/usr/bin/env bash
export DEVNET_NAME={{NAME}}
if [ -z "{{NAME}}" ]; then
export DEVNET_NAME=`basename {{TEMPLATE_FILE}} .yaml`
if [ -n "{{DATA_FILE}}" ]; then
export DATA_FILE_NAME=`basename {{DATA_FILE}} .json`
export DEVNET_NAME="$DEVNET_NAME-$DATA_FILE_NAME"
fi
fi
export ENCL_NAME="$DEVNET_NAME"-devnet
go run cmd/main.go -kurtosis-package {{KURTOSIS_PACKAGE}} \
-environment "tests/$ENCL_NAME.json" \
-template "{{TEMPLATE_FILE}}" \
-data "{{DATA_FILE}}" \
-enclave "$ENCL_NAME" \
&& cat "tests/$ENCL_NAME.json"
devnet-test DEVNET *TEST:
#!/usr/bin/env bash
export TESTS=({{TEST}})
# we need a timestamp in there to force kurtosis to not cache the test solely based on its name!
export ARGS=$(printf '%s\n' "${TESTS[@]}" | jq -R . | jq -s . | jq -s '{devnet: "{{DEVNET}}", timestamp: "{{datetime("%s")}}", tests: add}')
kurtosis run --enclave {{DEVNET}} \
--show-enclave-inspect=false \
./tests/ "$ARGS"
# Devnet recipes
# Mini devnet
mini-devnet: (devnet "mini.yaml")
# Simple devnet
simple-devnet: (devnet "simple.yaml")
# Interop devnet
interop-devnet: (devnet "interop.yaml")
interop-devnet-test: (devnet-test "interop-devnet" "interop-smoke-test.sh")
# User devnet
user-devnet DATA_FILE:
{{just_executable()}} devnet "user.yaml" {{DATA_FILE}} {{file_stem(DATA_FILE)}}
# Pectra devnet
pectra-devnet: (devnet "pectra.yaml")
# subshells
enter-devnet DEVNET CHAIN='Ethereum':
exec go run ../devnet-sdk/shell/cmd/enter/main.go --devnet tests/{{DEVNET}}.json --chain {{CHAIN}}
optimism_package:
chains:
- participants:
- el_type: op-geth
el_image: ""
el_log_level: ""
el_extra_env_vars: {}
el_extra_labels: {}
el_extra_params: []
el_tolerations: []
el_volume_size: 0
el_min_cpu: 0
el_max_cpu: 0
el_min_mem: 0
el_max_mem: 0
cl_type: op-node
cl_image: ""
cl_log_level: ""
cl_extra_env_vars: {}
cl_extra_labels: {}
cl_extra_params: []
cl_tolerations: []
cl_volume_size: 0
cl_min_cpu: 0
cl_max_cpu: 0
cl_min_mem: 0
cl_max_mem: 0
node_selectors: {}
tolerations: []
count: 1
network_params:
network: "kurtosis"
network_id: "2151908"
seconds_per_slot: 2
name: "op-kurtosis"
fjord_time_offset: 0
granite_time_offset: 0
holocene_time_offset: 0
fund_dev_accounts: true
batcher_params:
image: ""
extra_params: []
mev_params:
rollup_boost_image: ""
builder_host: ""
builder_port: ""
additional_services: []
op_contract_deployer_params:
image: opsigma/op-deployer:v0.0.7-http
l1_artifacts_locator: https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-9af7366a7102f51e8dbe451dcfa22971131d89e218915c91f420a164cc48be65.tar.gz
l2_artifacts_locator: https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-9af7366a7102f51e8dbe451dcfa22971131d89e218915c91f420a164cc48be65.tar.gz
global_log_level: "info"
global_node_selectors: {}
global_tolerations: []
persistent: false
ethereum_package:
network_params:
preset: minimal
genesis_delay: 5
additional_preloaded_contracts: |
{
"0x4e59b44847b379578588920cA78FbF26c0B4956C": {
"balance": "0ETH",
"code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3",
"storage": {},
"nonce": "1"
}
}
optimism_package:
chains:
- participants:
- el_type: op-geth
el_image: ""
el_log_level: ""
el_extra_env_vars: {}
el_extra_labels: {}
el_extra_params: []
el_tolerations: []
el_volume_size: 0
el_min_cpu: 0
el_max_cpu: 0
el_min_mem: 0
el_max_mem: 0
cl_type: op-node
cl_image: {{ localDockerImage "op-node" }}
cl_log_level: ""
cl_extra_env_vars: {}
cl_extra_labels: {}
cl_extra_params: []
cl_tolerations: []
cl_volume_size: 0
cl_min_cpu: 0
cl_max_cpu: 0
cl_min_mem: 0
cl_max_mem: 0
node_selectors: {}
tolerations: []
count: 1
network_params:
network: "kurtosis"
network_id: "2151908"
seconds_per_slot: 2
name: "op-kurtosis"
fjord_time_offset: 0
granite_time_offset: 0
holocene_time_offset: 0
fund_dev_accounts: true
batcher_params:
image: {{ localDockerImage "op-batcher" }}
extra_params: []
challenger_params:
image: {{ localDockerImage "op-challenger" }}
cannon_prestate_path: ""
cannon_prestates_url: "http://fileserver/proofs/op-program/cannon"
extra_params: []
proposer_params:
image: {{ localDockerImage "op-proposer" }}
extra_params: []
game_type: 1
proposal_interval: 10m
mev_params:
rollup_boost_image: ""
builder_host: ""
builder_port: ""
additional_services: []
op_contract_deployer_params:
image: {{ localDockerImage "op-deployer" }}
l1_artifacts_locator: {{ localContractArtifacts "l1" }}
l2_artifacts_locator: {{ localContractArtifacts "l2" }}
global_deploy_overrides:
faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate }}
global_log_level: "info"
global_node_selectors: {}
global_tolerations: []
persistent: false
ethereum_package:
participants_matrix:
el:
- el_type: geth
el_image: ethpandaops/geth:prague-devnet-5-f85cde7
cl:
- cl_type: lighthouse
cl_image: ethpandaops/lighthouse:single_attestation-b6d80eb
network_params:
electra_fork_epoch: 1
min_validator_withdrawability_delay: 1
shard_committee_period: 1
churn_limit_quotient: 16
genesis_delay: 5
additional_preloaded_contracts: |
{
"0x4e59b44847b379578588920cA78FbF26c0B4956C": {
"balance": "0ETH",
"code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3",
"storage": {},
"nonce": "1"
}
}
package build
import (
"bytes"
"fmt"
"log"
"os/exec"
"text/template"
)
// ContractBuilder handles building smart contracts using just commands
type ContractBuilder struct {
// Base directory where the build commands should be executed
baseDir string
// Template for the build command
cmdTemplate *template.Template
// Dry run mode
dryRun bool
builtContracts map[string]interface{}
}
const (
contractsCmdTemplateStr = "just _contracts-build {{.BundlePath}}"
)
var defaultContractTemplate *template.Template
func init() {
defaultContractTemplate = template.Must(template.New("contract_build_cmd").Parse(contractsCmdTemplateStr))
}
type ContractBuilderOptions func(*ContractBuilder)
func WithContractBaseDir(baseDir string) ContractBuilderOptions {
return func(b *ContractBuilder) {
b.baseDir = baseDir
}
}
func WithContractTemplate(cmdTemplate *template.Template) ContractBuilderOptions {
return func(b *ContractBuilder) {
b.cmdTemplate = cmdTemplate
}
}
func WithContractDryRun(dryRun bool) ContractBuilderOptions {
return func(b *ContractBuilder) {
b.dryRun = dryRun
}
}
// NewContractBuilder creates a new ContractBuilder instance
func NewContractBuilder(opts ...ContractBuilderOptions) *ContractBuilder {
b := &ContractBuilder{
baseDir: ".",
cmdTemplate: defaultContractTemplate,
dryRun: false,
builtContracts: make(map[string]interface{}),
}
for _, opt := range opts {
opt(b)
}
return b
}
// templateData holds the data for the command template
type contractTemplateData struct {
BundlePath string
}
// Build executes the contract build command
func (b *ContractBuilder) Build(_layer string, bundlePath string) error {
// since we ignore layer for now, we can skip the build if the file already
// exists: it'll be the same file!
if _, ok := b.builtContracts[bundlePath]; ok {
return nil
}
log.Printf("Building contracts bundle: %s", bundlePath)
// Prepare template data
data := contractTemplateData{
BundlePath: bundlePath,
}
// Execute template to get command string
var cmdBuf bytes.Buffer
if err := b.cmdTemplate.Execute(&cmdBuf, data); err != nil {
return fmt.Errorf("failed to execute command template: %w", err)
}
// Create command
cmd := exec.Command("sh", "-c", cmdBuf.String())
cmd.Dir = b.baseDir
if !b.dryRun {
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("contract build command failed: %w\nOutput: %s", err, string(output))
}
}
b.builtContracts[bundlePath] = struct{}{}
return nil
}
package build
import (
"bytes"
"fmt"
"log"
"os/exec"
"text/template"
)
// DockerBuilder handles building docker images using just commands
type DockerBuilder struct {
// Base directory where the build commands should be executed
baseDir string
// Template for the build command
cmdTemplate *template.Template
// Dry run mode
dryRun bool
builtImages map[string]string
}
const cmdTemplateStr = "just {{.ProjectName}}-image {{.ImageTag}}"
var defaultCmdTemplate *template.Template
func init() {
defaultCmdTemplate = template.Must(template.New("docker_build_cmd").Parse(cmdTemplateStr))
}
type DockerBuilderOptions func(*DockerBuilder)
func WithDockerCmdTemplate(cmdTemplate *template.Template) DockerBuilderOptions {
return func(b *DockerBuilder) {
b.cmdTemplate = cmdTemplate
}
}
func WithDockerBaseDir(baseDir string) DockerBuilderOptions {
return func(b *DockerBuilder) {
b.baseDir = baseDir
}
}
func WithDockerDryRun(dryRun bool) DockerBuilderOptions {
return func(b *DockerBuilder) {
b.dryRun = dryRun
}
}
// NewDockerBuilder creates a new DockerBuilder instance
func NewDockerBuilder(opts ...DockerBuilderOptions) *DockerBuilder {
b := &DockerBuilder{
baseDir: ".",
cmdTemplate: defaultCmdTemplate,
dryRun: false,
builtImages: make(map[string]string),
}
for _, opt := range opts {
opt(b)
}
return b
}
// templateData holds the data for the command template
type templateData struct {
ImageTag string
ProjectName string
}
// Build executes the docker build command for the given project and image tag
func (b *DockerBuilder) Build(projectName, imageTag string) (string, error) {
if builtImage, ok := b.builtImages[projectName]; ok {
return builtImage, nil
}
log.Printf("Building docker image for project: %s with tag: %s", projectName, imageTag)
// Prepare template data
data := templateData{
ImageTag: imageTag,
ProjectName: projectName,
}
// Execute template to get command string
var cmdBuf bytes.Buffer
if err := b.cmdTemplate.Execute(&cmdBuf, data); err != nil {
return "", fmt.Errorf("failed to execute command template: %w", err)
}
// Create command
cmd := exec.Command("sh", "-c", cmdBuf.String())
cmd.Dir = b.baseDir
if !b.dryRun {
output, err := cmd.CombinedOutput()
if err != nil {
return "", fmt.Errorf("build command failed: %w\nOutput: %s", err, string(output))
}
}
// Return the image tag as confirmation of successful build
b.builtImages[projectName] = imageTag
return imageTag, nil
}
package build
import (
"bytes"
"fmt"
"log"
"os/exec"
"text/template"
)
// PrestateBuilder handles building prestates using just commands
type PrestateBuilder struct {
baseDir string
cmdTemplate *template.Template
dryRun bool
builtPrestates map[string]interface{}
}
const (
prestateCmdTemplateStr = "just _prestate-build {{.Path}}"
)
var defaultPrestateTemplate *template.Template
func init() {
defaultPrestateTemplate = template.Must(template.New("prestate_build_cmd").Parse(prestateCmdTemplateStr))
}
type PrestateBuilderOptions func(*PrestateBuilder)
func WithPrestateBaseDir(baseDir string) PrestateBuilderOptions {
return func(b *PrestateBuilder) {
b.baseDir = baseDir
}
}
func WithPrestateTemplate(cmdTemplate *template.Template) PrestateBuilderOptions {
return func(b *PrestateBuilder) {
b.cmdTemplate = cmdTemplate
}
}
func WithPrestateDryRun(dryRun bool) PrestateBuilderOptions {
return func(b *PrestateBuilder) {
b.dryRun = dryRun
}
}
// NewPrestateBuilder creates a new PrestateBuilder instance
func NewPrestateBuilder(opts ...PrestateBuilderOptions) *PrestateBuilder {
b := &PrestateBuilder{
baseDir: ".",
cmdTemplate: defaultPrestateTemplate,
dryRun: false,
builtPrestates: make(map[string]interface{}),
}
for _, opt := range opts {
opt(b)
}
return b
}
// templateData holds the data for the command template
type prestateTemplateData struct {
Path string
}
// Build executes the prestate build command
func (b *PrestateBuilder) Build(path string) error {
if _, ok := b.builtPrestates[path]; ok {
return nil
}
log.Printf("Building prestate: %s", path)
// Prepare template data
data := prestateTemplateData{
Path: path,
}
// Execute template to get command string
var cmdBuf bytes.Buffer
if err := b.cmdTemplate.Execute(&cmdBuf, data); err != nil {
return fmt.Errorf("failed to execute command template: %w", err)
}
// Create command
cmd := exec.Command("sh", "-c", cmdBuf.String())
cmd.Dir = b.baseDir
if !b.dryRun {
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("prestate build command failed: %w\nOutput: %s", err, string(output))
}
}
b.builtPrestates[path] = struct{}{}
return nil
}
package deploy
import (
"bytes"
"context"
"fmt"
"io"
"log"
"os"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/engine"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/sources/spec"
)
type EngineManager interface {
EnsureRunning() error
}
type deployer interface {
Deploy(ctx context.Context, input io.Reader) (*spec.EnclaveSpec, error)
GetEnvironmentInfo(ctx context.Context, spec *spec.EnclaveSpec) (*kurtosis.KurtosisEnvironment, error)
}
type DeployerFunc func(opts ...kurtosis.KurtosisDeployerOptions) (deployer, error)
type DeployerOption func(*Deployer)
type Deployer struct {
baseDir string
dryRun bool
kurtosisPkg string
enclave string
kurtosisBinary string
ktDeployer DeployerFunc
engineManager EngineManager
templateFile string
dataFile string
}
func WithKurtosisDeployer(ktDeployer DeployerFunc) DeployerOption {
return func(d *Deployer) {
d.ktDeployer = ktDeployer
}
}
func WithEngineManager(engineManager EngineManager) DeployerOption {
return func(d *Deployer) {
d.engineManager = engineManager
}
}
func WithKurtosisBinary(kurtosisBinary string) DeployerOption {
return func(d *Deployer) {
d.kurtosisBinary = kurtosisBinary
}
}
func WithKurtosisPackage(kurtosisPkg string) DeployerOption {
return func(d *Deployer) {
d.kurtosisPkg = kurtosisPkg
}
}
func WithTemplateFile(templateFile string) DeployerOption {
return func(d *Deployer) {
d.templateFile = templateFile
}
}
func WithDataFile(dataFile string) DeployerOption {
return func(d *Deployer) {
d.dataFile = dataFile
}
}
func WithBaseDir(baseDir string) DeployerOption {
return func(d *Deployer) {
d.baseDir = baseDir
}
}
func WithDryRun(dryRun bool) DeployerOption {
return func(d *Deployer) {
d.dryRun = dryRun
}
}
func WithEnclave(enclave string) DeployerOption {
return func(d *Deployer) {
d.enclave = enclave
}
}
func NewDeployer(opts ...DeployerOption) *Deployer {
d := &Deployer{
kurtosisBinary: "kurtosis",
ktDeployer: func(opts ...kurtosis.KurtosisDeployerOptions) (deployer, error) {
return kurtosis.NewKurtosisDeployer(opts...)
},
}
for _, opt := range opts {
opt(d)
}
if d.engineManager == nil {
d.engineManager = engine.NewEngineManager(engine.WithKurtosisBinary(d.kurtosisBinary))
}
return d
}
func (d *Deployer) deployEnvironment(ctx context.Context, r io.Reader) (*kurtosis.KurtosisEnvironment, error) {
// Create a multi reader to output deployment input to stdout
buf := bytes.NewBuffer(nil)
tee := io.TeeReader(r, buf)
// Log the deployment input
log.Println("Deployment input:")
if _, err := io.Copy(os.Stdout, tee); err != nil {
return nil, fmt.Errorf("error copying deployment input: %w", err)
}
opts := []kurtosis.KurtosisDeployerOptions{
kurtosis.WithKurtosisBaseDir(d.baseDir),
kurtosis.WithKurtosisDryRun(d.dryRun),
kurtosis.WithKurtosisPackageName(d.kurtosisPkg),
kurtosis.WithKurtosisEnclave(d.enclave),
}
ktd, err := d.ktDeployer(opts...)
if err != nil {
return nil, fmt.Errorf("error creating kurtosis deployer: %w", err)
}
spec, err := ktd.Deploy(ctx, buf)
if err != nil {
return nil, fmt.Errorf("error deploying kurtosis package: %w", err)
}
return ktd.GetEnvironmentInfo(ctx, spec)
}
func (d *Deployer) renderTemplate(buildDir string, urlBuilder func(path ...string) string) (*bytes.Buffer, error) {
t := &Templater{
baseDir: d.baseDir,
dryRun: d.dryRun,
enclave: d.enclave,
templateFile: d.templateFile,
dataFile: d.dataFile,
buildDir: buildDir,
urlBuilder: urlBuilder,
}
return t.Render()
}
func (d *Deployer) Deploy(ctx context.Context, r io.Reader) (*kurtosis.KurtosisEnvironment, error) {
if !d.dryRun {
if err := d.engineManager.EnsureRunning(); err != nil {
return nil, fmt.Errorf("error ensuring kurtosis engine is running: %w", err)
}
}
tmpDir, err := os.MkdirTemp("", d.enclave)
if err != nil {
return nil, fmt.Errorf("error creating temporary directory: %w", err)
}
defer os.RemoveAll(tmpDir)
srv := &FileServer{
baseDir: d.baseDir,
dryRun: d.dryRun,
enclave: d.enclave,
deployer: d.ktDeployer,
}
buf, err := d.renderTemplate(tmpDir, srv.URL)
if err != nil {
return nil, fmt.Errorf("error rendering template: %w", err)
}
if err := srv.Deploy(ctx, tmpDir); err != nil {
return nil, fmt.Errorf("error deploying fileserver: %w", err)
}
return d.deployEnvironment(ctx, buf)
}
package deploy
import (
"bytes"
"context"
"encoding/json"
"io"
"os"
"path/filepath"
"testing"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/sources/spec"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// mockDeployerForTest implements the deployer interface for testing
type mockDeployerForTest struct {
baseDir string
}
func (m *mockDeployerForTest) Deploy(ctx context.Context, input io.Reader) (*spec.EnclaveSpec, error) {
// Create a mock env.json file
envPath := filepath.Join(m.baseDir, "env.json")
mockEnv := map[string]interface{}{
"test": "value",
}
data, err := json.Marshal(mockEnv)
if err != nil {
return nil, err
}
if err := os.WriteFile(envPath, data, 0644); err != nil {
return nil, err
}
return &spec.EnclaveSpec{}, nil
}
func (m *mockDeployerForTest) GetEnvironmentInfo(ctx context.Context, spec *spec.EnclaveSpec) (*kurtosis.KurtosisEnvironment, error) {
return &kurtosis.KurtosisEnvironment{}, nil
}
func TestDeploy(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create a temporary directory for the environment output
tmpDir, err := os.MkdirTemp("", "deploy-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
// Create a simple template file
templatePath := filepath.Join(tmpDir, "template.yaml")
err = os.WriteFile(templatePath, []byte("test: {{ .Config }}"), 0644)
require.NoError(t, err)
// Create a simple data file
dataPath := filepath.Join(tmpDir, "data.json")
err = os.WriteFile(dataPath, []byte(`{"Config": "value"}`), 0644)
require.NoError(t, err)
envPath := filepath.Join(tmpDir, "env.json")
// Create a simple deployment configuration
deployConfig := bytes.NewBufferString(`{"test": "config"}`)
// Create a mock deployer function
mockDeployerFunc := func(opts ...kurtosis.KurtosisDeployerOptions) (deployer, error) {
return &mockDeployerForTest{baseDir: tmpDir}, nil
}
d := NewDeployer(
WithBaseDir(tmpDir),
WithKurtosisDeployer(mockDeployerFunc),
WithDryRun(true),
WithTemplateFile(templatePath),
WithDataFile(dataPath),
)
env, err := d.Deploy(ctx, deployConfig)
require.NoError(t, err)
require.NotNil(t, env)
// Verify the environment file was created
assert.FileExists(t, envPath)
// Read and verify the content
content, err := os.ReadFile(envPath)
require.NoError(t, err)
var envData map[string]interface{}
err = json.Unmarshal(content, &envData)
require.NoError(t, err)
assert.Equal(t, "value", envData["test"])
}
package deploy
import (
"bytes"
"context"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/util"
)
const FILESERVER_PACKAGE = "fileserver"
type FileServer struct {
baseDir string
enclave string
dryRun bool
deployer DeployerFunc
}
func (f *FileServer) URL(path ...string) string {
return fmt.Sprintf("http://%s/%s", FILESERVER_PACKAGE, strings.Join(path, "/"))
}
func (f *FileServer) Deploy(ctx context.Context, sourceDir string) error {
// Create a temp dir in the fileserver package
baseDir := filepath.Join(f.baseDir, FILESERVER_PACKAGE)
if err := os.MkdirAll(baseDir, 0755); err != nil {
return fmt.Errorf("error creating nebula directory: %w", err)
}
tempDir, err := os.MkdirTemp(baseDir, "upload-content")
if err != nil {
return fmt.Errorf("error creating temporary directory: %w", err)
}
defer os.RemoveAll(tempDir)
// Copy build dir contents to tempDir
if err := util.CopyDir(sourceDir, tempDir); err != nil {
return fmt.Errorf("error copying directory: %w", err)
}
buf := bytes.NewBuffer(nil)
buf.WriteString(fmt.Sprintf("source_path: %s\n", filepath.Base(tempDir)))
opts := []kurtosis.KurtosisDeployerOptions{
kurtosis.WithKurtosisBaseDir(f.baseDir),
kurtosis.WithKurtosisDryRun(f.dryRun),
kurtosis.WithKurtosisPackageName(FILESERVER_PACKAGE),
kurtosis.WithKurtosisEnclave(f.enclave),
}
d, err := f.deployer(opts...)
if err != nil {
return fmt.Errorf("error creating kurtosis deployer: %w", err)
}
_, err = d.Deploy(ctx, buf)
if err != nil {
return fmt.Errorf("error deploying kurtosis package: %w", err)
}
return nil
}
package deploy
import (
"context"
"io"
"os"
"path/filepath"
"testing"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/sources/spec"
"github.com/stretchr/testify/require"
)
func TestDeployFileserver(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tmpDir, err := os.MkdirTemp("", "deploy-fileserver-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
// Create a mock deployer function
mockDeployerFunc := func(opts ...kurtosis.KurtosisDeployerOptions) (deployer, error) {
return &mockDeployer{}, nil
}
testCases := []struct {
name string
fs *FileServer
shouldError bool
}{
{
name: "successful deployment",
fs: &FileServer{
baseDir: tmpDir,
enclave: "test-enclave",
dryRun: true,
deployer: mockDeployerFunc,
},
shouldError: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := tc.fs.Deploy(ctx, filepath.Join(tmpDir, "fileserver"))
if tc.shouldError {
require.Error(t, err)
} else {
require.NoError(t, err)
}
})
}
}
// mockDeployer implements the deployer interface for testing
type mockDeployer struct{}
func (m *mockDeployer) Deploy(ctx context.Context, input io.Reader) (*spec.EnclaveSpec, error) {
return &spec.EnclaveSpec{}, nil
}
func (m *mockDeployer) GetEnvironmentInfo(ctx context.Context, spec *spec.EnclaveSpec) (*kurtosis.KurtosisEnvironment, error) {
return &kurtosis.KurtosisEnvironment{}, nil
}
package deploy
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/build"
)
type PrestateInfo struct {
URL string `json:"url"`
Hashes map[string]string `json:"hashes"`
}
type localPrestateHolder struct {
info *PrestateInfo
baseDir string
buildDir string
dryRun bool
builder *build.PrestateBuilder
urlBuilder func(path ...string) string
}
func (h *localPrestateHolder) GetPrestateInfo() (*PrestateInfo, error) {
if h.info != nil {
return h.info, nil
}
prestatePath := []string{"proofs", "op-program", "cannon"}
prestateURL := h.urlBuilder(prestatePath...)
// Create build directory with the final path structure
buildDir := filepath.Join(append([]string{h.buildDir}, prestatePath...)...)
if err := os.MkdirAll(buildDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create prestate build directory: %w", err)
}
info := &PrestateInfo{
URL: prestateURL,
Hashes: make(map[string]string),
}
if h.dryRun {
h.info = info
return info, nil
}
// Map of known file prefixes to their keys
fileToKey := map[string]string{
"prestate-proof.json": "prestate",
"prestate-proof-mt64.json": "prestate_mt64",
"prestate-proof-mt.json": "prestate_mt",
"prestate-proof-interop.json": "prestate_interop",
}
// Build all prestate files directly in the target directory
if err := h.builder.Build(buildDir); err != nil {
return nil, fmt.Errorf("failed to build prestates: %w", err)
}
// Find and process all prestate files
matches, err := filepath.Glob(filepath.Join(buildDir, "prestate-proof*.json"))
if err != nil {
return nil, fmt.Errorf("failed to find prestate files: %w", err)
}
// Process each file to rename it to its hash
for _, filePath := range matches {
content, err := os.ReadFile(filePath)
if err != nil {
return nil, fmt.Errorf("failed to read prestate %s: %w", filepath.Base(filePath), err)
}
var data struct {
Pre string `json:"pre"`
}
if err := json.Unmarshal(content, &data); err != nil {
return nil, fmt.Errorf("failed to parse prestate %s: %w", filepath.Base(filePath), err)
}
// Store hash with its corresponding key
if key, exists := fileToKey[filepath.Base(filePath)]; exists {
info.Hashes[key] = data.Pre
}
// Rename files to hash-based names
newFileName := data.Pre + ".json"
hashedPath := filepath.Join(buildDir, newFileName)
if err := os.Rename(filePath, hashedPath); err != nil {
return nil, fmt.Errorf("failed to rename prestate %s: %w", filepath.Base(filePath), err)
}
log.Printf("%s available at: %s/%s\n", filepath.Base(filePath), prestateURL, newFileName)
// Rename the corresponding binary file
binFilePath := strings.Replace(strings.TrimSuffix(filePath, ".json"), "-proof", "", 1) + ".bin.gz"
newBinFileName := data.Pre + ".bin.gz"
binHashedPath := filepath.Join(buildDir, newBinFileName)
if err := os.Rename(binFilePath, binHashedPath); err != nil {
return nil, fmt.Errorf("failed to rename prestate %s: %w", filepath.Base(binFilePath), err)
}
log.Printf("%s available at: %s/%s\n", filepath.Base(binFilePath), prestateURL, newBinFileName)
}
h.info = info
return info, nil
}
package deploy
import (
"bytes"
"os"
"path/filepath"
"strings"
"testing"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/tmpl"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
func TestLocalPrestate(t *testing.T) {
tests := []struct {
name string
dryRun bool
wantErr bool
}{
{
name: "dry run mode",
dryRun: true,
wantErr: false,
},
{
name: "normal mode",
dryRun: false,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "prestate-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
// Create a mock justfile for each test case
err = os.WriteFile(filepath.Join(tmpDir, "justfile"), []byte(`
_prestate-build target:
@echo "Mock prestate build"
`), 0644)
require.NoError(t, err)
templater := &Templater{
baseDir: tmpDir,
dryRun: tt.dryRun,
buildDir: tmpDir,
urlBuilder: func(path ...string) string {
return "http://fileserver/" + strings.Join(path, "/")
},
}
// Create template context with just the prestate function
tmplCtx := tmpl.NewTemplateContext(templater.localPrestateOption())
// Test template with multiple calls to localPrestate
template := `first:
url: {{(localPrestate).URL}}
hashes:
game: {{index (localPrestate).Hashes "game"}}
proof: {{index (localPrestate).Hashes "proof"}}
second:
url: {{(localPrestate).URL}}
hashes:
game: {{index (localPrestate).Hashes "game"}}
proof: {{index (localPrestate).Hashes "proof"}}`
buf := bytes.NewBuffer(nil)
err = tmplCtx.InstantiateTemplate(bytes.NewBufferString(template), buf)
if tt.wantErr {
assert.Error(t, err)
return
}
require.NoError(t, err)
// Verify the output is valid YAML and contains the static path
output := buf.String()
assert.Contains(t, output, "url: http://fileserver/proofs/op-program/cannon")
// Verify both calls return the same values
var result struct {
First struct {
URL string `yaml:"url"`
Hashes map[string]string `yaml:"hashes"`
} `yaml:"first"`
Second struct {
URL string `yaml:"url"`
Hashes map[string]string `yaml:"hashes"`
} `yaml:"second"`
}
err = yaml.Unmarshal(buf.Bytes(), &result)
require.NoError(t, err)
// Check that both calls returned identical results
assert.Equal(t, result.First.URL, result.Second.URL, "URLs should match")
assert.Equal(t, result.First.Hashes, result.Second.Hashes, "Hashes should match")
// Verify the directory was created only once
prestateDir := filepath.Join(tmpDir, "proofs", "op-program", "cannon")
assert.DirExists(t, prestateDir)
})
}
}
package deploy
import (
"bytes"
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/build"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/tmpl"
)
type Templater struct {
enclave string
dryRun bool
baseDir string
templateFile string
dataFile string
buildDir string
urlBuilder func(path ...string) string
}
func (f *Templater) localDockerImageOption() tmpl.TemplateContextOptions {
dockerBuilder := build.NewDockerBuilder(
build.WithDockerBaseDir(f.baseDir),
build.WithDockerDryRun(f.dryRun),
)
imageTag := func(projectName string) string {
return fmt.Sprintf("%s:%s", projectName, f.enclave)
}
return tmpl.WithFunction("localDockerImage", func(projectName string) (string, error) {
return dockerBuilder.Build(projectName, imageTag(projectName))
})
}
func (f *Templater) localContractArtifactsOption() tmpl.TemplateContextOptions {
contractsBundle := fmt.Sprintf("contracts-bundle-%s.tar.gz", f.enclave)
contractsBundlePath := func(_ string) string {
return filepath.Join(f.buildDir, contractsBundle)
}
contractsURL := f.urlBuilder(contractsBundle)
contractBuilder := build.NewContractBuilder(
build.WithContractBaseDir(f.baseDir),
build.WithContractDryRun(f.dryRun),
)
return tmpl.WithFunction("localContractArtifacts", func(layer string) (string, error) {
bundlePath := contractsBundlePath(layer)
if err := contractBuilder.Build(layer, bundlePath); err != nil {
return "", err
}
log.Printf("%s: contract artifacts available at: %s\n", layer, contractsURL)
return contractsURL, nil
})
}
func (f *Templater) localPrestateOption() tmpl.TemplateContextOptions {
holder := &localPrestateHolder{
baseDir: f.baseDir,
buildDir: f.buildDir,
dryRun: f.dryRun,
builder: build.NewPrestateBuilder(
build.WithPrestateBaseDir(f.baseDir),
build.WithPrestateDryRun(f.dryRun),
),
urlBuilder: f.urlBuilder,
}
return tmpl.WithFunction("localPrestate", func() (*PrestateInfo, error) {
return holder.GetPrestateInfo()
})
}
func (f *Templater) Render() (*bytes.Buffer, error) {
opts := []tmpl.TemplateContextOptions{
f.localDockerImageOption(),
f.localContractArtifactsOption(),
f.localPrestateOption(),
tmpl.WithBaseDir(f.baseDir),
}
// Read and parse the data file if provided
if f.dataFile != "" {
data, err := os.ReadFile(f.dataFile)
if err != nil {
return nil, fmt.Errorf("error reading data file: %w", err)
}
var templateData map[string]interface{}
if err := json.Unmarshal(data, &templateData); err != nil {
return nil, fmt.Errorf("error parsing JSON data: %w", err)
}
opts = append(opts, tmpl.WithData(templateData))
}
// Open template file
tmplFile, err := os.Open(f.templateFile)
if err != nil {
return nil, fmt.Errorf("error opening template file: %w", err)
}
defer tmplFile.Close()
// Create template context
tmplCtx := tmpl.NewTemplateContext(opts...)
// Process template
buf := bytes.NewBuffer(nil)
if err := tmplCtx.InstantiateTemplate(tmplFile, buf); err != nil {
return nil, fmt.Errorf("error processing template: %w", err)
}
return buf, nil
}
package deploy
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRenderTemplate(t *testing.T) {
// Create a temporary directory for test files
tmpDir, err := os.MkdirTemp("", "template-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
// Create a test template file
templateContent := `
name: {{.name}}
image: {{localDockerImage "test-project"}}
artifacts: {{localContractArtifacts "l1"}}`
templatePath := filepath.Join(tmpDir, "template.yaml")
err = os.WriteFile(templatePath, []byte(templateContent), 0644)
require.NoError(t, err)
// Create a test data file
dataContent := `{"name": "test-deployment"}`
dataPath := filepath.Join(tmpDir, "data.json")
err = os.WriteFile(dataPath, []byte(dataContent), 0644)
require.NoError(t, err)
// Create a Templater instance
templater := &Templater{
enclave: "test-enclave",
dryRun: true,
baseDir: tmpDir,
templateFile: templatePath,
dataFile: dataPath,
buildDir: tmpDir,
urlBuilder: func(path ...string) string {
return "http://localhost:8080/" + strings.Join(path, "/")
},
}
buf, err := templater.Render()
require.NoError(t, err)
// Verify template rendering
assert.Contains(t, buf.String(), "test-deployment")
assert.Contains(t, buf.String(), "test-project:test-enclave")
}
package kurtosis
import (
"context"
"io"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/sources/deployer"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/sources/inspect"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/sources/interfaces"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/sources/jwt"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/sources/spec"
)
type enclaveSpecAdapter struct{}
func (a *enclaveSpecAdapter) EnclaveSpec(r io.Reader) (*spec.EnclaveSpec, error) {
return spec.NewSpec().ExtractData(r)
}
var _ interfaces.EnclaveSpecifier = (*enclaveSpecAdapter)(nil)
type enclaveInspectAdapter struct{}
func (a *enclaveInspectAdapter) EnclaveInspect(ctx context.Context, enclave string) (*inspect.InspectData, error) {
return inspect.NewInspector(enclave).ExtractData(ctx)
}
var _ interfaces.EnclaveInspecter = (*enclaveInspectAdapter)(nil)
type enclaveDeployerAdapter struct{}
func (a *enclaveDeployerAdapter) EnclaveObserve(ctx context.Context, enclave string) (*deployer.DeployerData, error) {
return deployer.NewDeployer(enclave).ExtractData(ctx)
}
var _ interfaces.EnclaveObserver = (*enclaveDeployerAdapter)(nil)
type enclaveJWTAdapter struct{}
func (a *enclaveJWTAdapter) ExtractData(ctx context.Context, enclave string) (*jwt.Data, error) {
return jwt.NewExtractor(enclave).ExtractData(ctx)
}
var _ interfaces.JWTExtractor = (*enclaveJWTAdapter)(nil)
package engine
import (
"fmt"
"os/exec"
"github.com/kurtosis-tech/kurtosis/api/golang/kurtosis_version"
)
// EngineManager handles running the Kurtosis engine
type EngineManager struct {
kurtosisBinary string
version string
}
// Option configures an EngineManager
type Option func(*EngineManager)
// WithKurtosisBinary sets the path to the kurtosis binary
func WithKurtosisBinary(binary string) Option {
return func(e *EngineManager) {
e.kurtosisBinary = binary
}
}
// WithVersion sets the engine version
func WithVersion(version string) Option {
return func(e *EngineManager) {
e.version = version
}
}
// NewEngineManager creates a new EngineManager with the given options
func NewEngineManager(opts ...Option) *EngineManager {
e := &EngineManager{
kurtosisBinary: "kurtosis", // Default to expecting kurtosis in PATH
version: kurtosis_version.KurtosisVersion, // Default to library version
}
for _, opt := range opts {
opt(e)
}
return e
}
// EnsureRunning starts the Kurtosis engine with the configured version
func (e *EngineManager) EnsureRunning() error {
cmd := exec.Command(e.kurtosisBinary, "engine", "start", "--version", e.version)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to start kurtosis engine: %w", err)
}
return nil
}
package fake
import (
"context"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/interfaces"
"github.com/kurtosis-tech/kurtosis/api/golang/core/lib/starlark_run_config"
)
// KurtosisContext implements interfaces.KurtosisContextInterface for testing
type KurtosisContext struct {
EnclaveCtx *EnclaveContext
GetErr error
CreateErr error
}
func (f *KurtosisContext) CreateEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) {
if f.CreateErr != nil {
return nil, f.CreateErr
}
return f.EnclaveCtx, nil
}
func (f *KurtosisContext) GetEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) {
if f.GetErr != nil {
return nil, f.GetErr
}
return f.EnclaveCtx, nil
}
// EnclaveContext implements interfaces.EnclaveContext for testing
type EnclaveContext struct {
RunErr error
Responses []interfaces.StarlarkResponse
}
func (f *EnclaveContext) RunStarlarkPackage(ctx context.Context, pkg string, params *starlark_run_config.StarlarkRunConfig) (<-chan interfaces.StarlarkResponse, string, error) {
if f.RunErr != nil {
return nil, "", f.RunErr
}
// Create a channel and send all responses
ch := make(chan interfaces.StarlarkResponse)
go func() {
defer close(ch)
for _, resp := range f.Responses {
ch <- resp
}
}()
return ch, "", nil
}
// StarlarkResponse implements interfaces.StarlarkResponse for testing
type StarlarkResponse struct {
Err interfaces.StarlarkError
ProgressMsg []string
Instruction string
IsSuccessful bool
Warning string
Info string
Result string
HasResult bool // tracks whether result was explicitly set
}
func (f *StarlarkResponse) GetError() interfaces.StarlarkError {
return f.Err
}
func (f *StarlarkResponse) GetProgressInfo() interfaces.ProgressInfo {
if f.ProgressMsg != nil {
return &ProgressInfo{Info: f.ProgressMsg}
}
return nil
}
func (f *StarlarkResponse) GetInstruction() interfaces.Instruction {
if f.Instruction != "" {
return &Instruction{Desc: f.Instruction}
}
return nil
}
func (f *StarlarkResponse) GetRunFinishedEvent() interfaces.RunFinishedEvent {
return &RunFinishedEvent{IsSuccessful: f.IsSuccessful}
}
func (f *StarlarkResponse) GetWarning() interfaces.Warning {
if f.Warning != "" {
return &Warning{Msg: f.Warning}
}
return nil
}
func (f *StarlarkResponse) GetInfo() interfaces.Info {
if f.Info != "" {
return &Info{Msg: f.Info}
}
return nil
}
func (f *StarlarkResponse) GetInstructionResult() interfaces.InstructionResult {
if !f.HasResult {
return nil
}
return &InstructionResult{Result: f.Result}
}
// ProgressInfo implements ProgressInfo for testing
type ProgressInfo struct {
Info []string
}
func (f *ProgressInfo) GetCurrentStepInfo() []string {
return f.Info
}
// Instruction implements Instruction for testing
type Instruction struct {
Desc string
}
func (f *Instruction) GetDescription() string {
return f.Desc
}
// StarlarkError implements StarlarkError for testing
type StarlarkError struct {
InterpretationErr error
ValidationErr error
ExecutionErr error
}
func (f *StarlarkError) GetInterpretationError() error {
return f.InterpretationErr
}
func (f *StarlarkError) GetValidationError() error {
return f.ValidationErr
}
func (f *StarlarkError) GetExecutionError() error {
return f.ExecutionErr
}
// RunFinishedEvent implements RunFinishedEvent for testing
type RunFinishedEvent struct {
IsSuccessful bool
}
func (f *RunFinishedEvent) GetIsRunSuccessful() bool {
return f.IsSuccessful
}
// Warning implements Warning for testing
type Warning struct {
Msg string
}
func (f *Warning) GetMessage() string {
return f.Msg
}
// Info implements Info for testing
type Info struct {
Msg string
}
func (f *Info) GetMessage() string {
return f.Msg
}
// InstructionResult implements InstructionResult for testing
type InstructionResult struct {
Result string
}
func (f *InstructionResult) GetSerializedInstructionResult() string {
return f.Result
}
package interfaces
import (
"context"
"github.com/kurtosis-tech/kurtosis/api/golang/core/lib/starlark_run_config"
)
// Interfaces for Kurtosis SDK types to make testing easier
type StarlarkError interface {
GetInterpretationError() error
GetValidationError() error
GetExecutionError() error
}
type ProgressInfo interface {
GetCurrentStepInfo() []string
}
type Instruction interface {
GetDescription() string
}
type RunFinishedEvent interface {
GetIsRunSuccessful() bool
}
type Warning interface {
GetMessage() string
}
type Info interface {
GetMessage() string
}
type InstructionResult interface {
GetSerializedInstructionResult() string
}
type StarlarkResponse interface {
GetError() StarlarkError
GetProgressInfo() ProgressInfo
GetInstruction() Instruction
GetRunFinishedEvent() RunFinishedEvent
GetWarning() Warning
GetInfo() Info
GetInstructionResult() InstructionResult
}
type EnclaveContext interface {
RunStarlarkPackage(context.Context, string, *starlark_run_config.StarlarkRunConfig) (<-chan StarlarkResponse, string, error)
}
type KurtosisContextInterface interface {
CreateEnclave(context.Context, string) (EnclaveContext, error)
GetEnclave(context.Context, string) (EnclaveContext, error)
}
package run
import (
"context"
"fmt"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/interfaces"
"github.com/fatih/color"
)
// Color printers
var (
printCyan = color.New(color.FgCyan).SprintFunc()
printYellow = color.New(color.FgYellow).SprintFunc()
printRed = color.New(color.FgRed).SprintFunc()
printBlue = color.New(color.FgBlue).SprintFunc()
)
// MessageHandler defines the interface for handling different types of messages
type MessageHandler interface {
// Handle processes the message if applicable and returns:
// - bool: whether the message was handled
// - error: any error that occurred during handling
Handle(context.Context, interfaces.StarlarkResponse) (bool, error)
}
// MessageHandlerFunc is a function type that implements MessageHandler
type MessageHandlerFunc func(context.Context, interfaces.StarlarkResponse) (bool, error)
func (f MessageHandlerFunc) Handle(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
return f(ctx, resp)
}
// FirstMatchHandler returns a handler that applies the first matching handler from the given handlers
func FirstMatchHandler(handlers ...MessageHandler) MessageHandler {
return MessageHandlerFunc(func(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
for _, h := range handlers {
handled, err := h.Handle(ctx, resp)
if err != nil {
return true, err
}
if handled {
return true, nil
}
}
return false, nil
})
}
// AllHandlers returns a handler that applies all the given handlers in order
func AllHandlers(handlers ...MessageHandler) MessageHandler {
return MessageHandlerFunc(func(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
anyHandled := false
for _, h := range handlers {
handled, err := h.Handle(ctx, resp)
if err != nil {
return true, err
}
anyHandled = anyHandled || handled
}
return anyHandled, nil
})
}
// defaultHandler is the default message handler that provides standard Kurtosis output
var defaultHandler = FirstMatchHandler(
MessageHandlerFunc(handleProgress),
MessageHandlerFunc(handleInstruction),
MessageHandlerFunc(handleWarning),
MessageHandlerFunc(handleInfo),
MessageHandlerFunc(handleResult),
MessageHandlerFunc(handleError),
)
// handleProgress handles progress info messages
func handleProgress(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
if progressInfo := resp.GetProgressInfo(); progressInfo != nil {
// ignore progress messages, same as kurtosis run does
return true, nil
}
return false, nil
}
// handleInstruction handles instruction messages
func handleInstruction(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
if instruction := resp.GetInstruction(); instruction != nil {
desc := instruction.GetDescription()
fmt.Println(printCyan(desc))
return true, nil
}
return false, nil
}
// handleWarning handles warning messages
func handleWarning(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
if warning := resp.GetWarning(); warning != nil {
fmt.Println(printYellow(warning.GetMessage()))
return true, nil
}
return false, nil
}
// handleInfo handles info messages
func handleInfo(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
if info := resp.GetInfo(); info != nil {
fmt.Println(printBlue(info.GetMessage()))
return true, nil
}
return false, nil
}
// handleResult handles instruction result messages
func handleResult(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
if result := resp.GetInstructionResult(); result != nil {
if result.GetSerializedInstructionResult() != "" {
fmt.Printf("%s\n\n", result.GetSerializedInstructionResult())
}
return true, nil
}
return false, nil
}
// handleError handles error messages
func handleError(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
if err := resp.GetError(); err != nil {
if interpretErr := err.GetInterpretationError(); interpretErr != nil {
return true, fmt.Errorf(printRed("interpretation error: %v"), interpretErr)
}
if validationErr := err.GetValidationError(); validationErr != nil {
return true, fmt.Errorf(printRed("validation error: %v"), validationErr)
}
if executionErr := err.GetExecutionError(); executionErr != nil {
return true, fmt.Errorf(printRed("execution error: %v"), executionErr)
}
return true, nil
}
return false, nil
}
// makeRunFinishedHandler creates a handler for run finished events
func makeRunFinishedHandler(isSuccessful *bool) MessageHandlerFunc {
return func(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
if event := resp.GetRunFinishedEvent(); event != nil {
*isSuccessful = event.GetIsRunSuccessful()
return true, nil
}
return false, nil
}
}
package run
import (
"context"
"fmt"
"testing"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/fake"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/interfaces"
"github.com/stretchr/testify/assert"
)
func TestHandleProgress(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
response interfaces.StarlarkResponse
want bool
}{
{
name: "handles progress message",
response: &fake.StarlarkResponse{
ProgressMsg: []string{"Step 1", "Step 2"},
},
want: true,
},
{
name: "ignores non-progress message",
response: &fake.StarlarkResponse{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handled, err := handleProgress(ctx, tt.response)
assert.NoError(t, err)
assert.Equal(t, tt.want, handled)
})
}
}
func TestHandleInstruction(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
response interfaces.StarlarkResponse
want bool
}{
{
name: "handles instruction message",
response: &fake.StarlarkResponse{
Instruction: "Execute command",
},
want: true,
},
{
name: "ignores non-instruction message",
response: &fake.StarlarkResponse{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handled, err := handleInstruction(ctx, tt.response)
assert.NoError(t, err)
assert.Equal(t, tt.want, handled)
})
}
}
func TestHandleWarning(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
response interfaces.StarlarkResponse
want bool
}{
{
name: "handles warning message",
response: &fake.StarlarkResponse{
Warning: "Warning: deprecated feature",
},
want: true,
},
{
name: "ignores non-warning message",
response: &fake.StarlarkResponse{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handled, err := handleWarning(ctx, tt.response)
assert.NoError(t, err)
assert.Equal(t, tt.want, handled)
})
}
}
func TestHandleInfo(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
response interfaces.StarlarkResponse
want bool
}{
{
name: "handles info message",
response: &fake.StarlarkResponse{
Info: "System info",
},
want: true,
},
{
name: "ignores non-info message",
response: &fake.StarlarkResponse{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handled, err := handleInfo(ctx, tt.response)
assert.NoError(t, err)
assert.Equal(t, tt.want, handled)
})
}
}
func TestHandleResult(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
response interfaces.StarlarkResponse
want bool
}{
{
name: "handles result message",
response: &fake.StarlarkResponse{
Result: "Operation completed",
HasResult: true,
},
want: true,
},
{
name: "handles empty result message",
response: &fake.StarlarkResponse{
Result: "",
HasResult: true,
},
want: true,
},
{
name: "ignores non-result message",
response: &fake.StarlarkResponse{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handled, err := handleResult(ctx, tt.response)
assert.NoError(t, err)
assert.Equal(t, tt.want, handled)
})
}
}
func TestHandleError(t *testing.T) {
ctx := context.Background()
testErr := fmt.Errorf("test error")
tests := []struct {
name string
response interfaces.StarlarkResponse
want bool
wantError bool
}{
{
name: "handles interpretation error",
response: &fake.StarlarkResponse{
Err: &fake.StarlarkError{InterpretationErr: testErr},
},
want: true,
wantError: true,
},
{
name: "handles validation error",
response: &fake.StarlarkResponse{
Err: &fake.StarlarkError{ValidationErr: testErr},
},
want: true,
wantError: true,
},
{
name: "handles execution error",
response: &fake.StarlarkResponse{
Err: &fake.StarlarkError{ExecutionErr: testErr},
},
want: true,
wantError: true,
},
{
name: "ignores non-error message",
response: &fake.StarlarkResponse{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handled, err := handleError(ctx, tt.response)
if tt.wantError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, tt.want, handled)
})
}
}
func TestFirstMatchHandler(t *testing.T) {
ctx := context.Background()
testErr := fmt.Errorf("test error")
tests := []struct {
name string
handlers []MessageHandler
response interfaces.StarlarkResponse
want bool
wantError bool
}{
{
name: "first handler matches",
handlers: []MessageHandler{
MessageHandlerFunc(handleInfo),
MessageHandlerFunc(handleWarning),
},
response: &fake.StarlarkResponse{
Info: "test info",
},
want: true,
},
{
name: "second handler matches",
handlers: []MessageHandler{
MessageHandlerFunc(handleInfo),
MessageHandlerFunc(handleWarning),
},
response: &fake.StarlarkResponse{
Warning: "test warning",
},
want: true,
},
{
name: "no handlers match",
handlers: []MessageHandler{
MessageHandlerFunc(handleInfo),
MessageHandlerFunc(handleWarning),
},
response: &fake.StarlarkResponse{
Result: "test result", HasResult: true,
},
want: false,
},
{
name: "handler returns error",
handlers: []MessageHandler{
MessageHandlerFunc(handleError),
},
response: &fake.StarlarkResponse{
Err: &fake.StarlarkError{InterpretationErr: testErr},
},
want: true,
wantError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := FirstMatchHandler(tt.handlers...)
handled, err := handler.Handle(ctx, tt.response)
if tt.wantError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, tt.want, handled)
})
}
}
func TestAllHandlers(t *testing.T) {
ctx := context.Background()
testErr := fmt.Errorf("test error")
tests := []struct {
name string
handlers []MessageHandler
response interfaces.StarlarkResponse
want bool
wantError bool
}{
{
name: "multiple handlers match",
handlers: []MessageHandler{
MessageHandlerFunc(func(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
return true, nil
}),
MessageHandlerFunc(func(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) {
return true, nil
}),
},
response: &fake.StarlarkResponse{},
want: true,
},
{
name: "some handlers match",
handlers: []MessageHandler{
MessageHandlerFunc(handleInfo),
MessageHandlerFunc(handleWarning),
},
response: &fake.StarlarkResponse{
Info: "test info",
},
want: true,
},
{
name: "no handlers match",
handlers: []MessageHandler{
MessageHandlerFunc(handleInfo),
MessageHandlerFunc(handleWarning),
},
response: &fake.StarlarkResponse{
Result: "test result", HasResult: true,
},
want: false,
},
{
name: "handler returns error",
handlers: []MessageHandler{
MessageHandlerFunc(handleInfo),
MessageHandlerFunc(handleError),
},
response: &fake.StarlarkResponse{
Err: &fake.StarlarkError{InterpretationErr: testErr},
},
want: true,
wantError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := AllHandlers(tt.handlers...)
handled, err := handler.Handle(ctx, tt.response)
if tt.wantError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, tt.want, handled)
})
}
}
package run
import (
"context"
"errors"
"fmt"
"io"
"os"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/interfaces"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/wrappers"
"github.com/kurtosis-tech/kurtosis/api/golang/core/lib/starlark_run_config"
)
type KurtosisRunner struct {
dryRun bool
enclave string
kurtosisCtx interfaces.KurtosisContextInterface
runHandlers []MessageHandler
}
type KurtosisRunnerOptions func(*KurtosisRunner)
func WithKurtosisRunnerDryRun(dryRun bool) KurtosisRunnerOptions {
return func(r *KurtosisRunner) {
r.dryRun = dryRun
}
}
func WithKurtosisRunnerEnclave(enclave string) KurtosisRunnerOptions {
return func(r *KurtosisRunner) {
r.enclave = enclave
}
}
func WithKurtosisRunnerKurtosisContext(kurtosisCtx interfaces.KurtosisContextInterface) KurtosisRunnerOptions {
return func(r *KurtosisRunner) {
r.kurtosisCtx = kurtosisCtx
}
}
func WithKurtosisRunnerRunHandlers(runHandlers ...MessageHandler) KurtosisRunnerOptions {
return func(r *KurtosisRunner) {
r.runHandlers = runHandlers
}
}
func NewKurtosisRunner(opts ...KurtosisRunnerOptions) (*KurtosisRunner, error) {
r := &KurtosisRunner{}
for _, opt := range opts {
opt(r)
}
if r.kurtosisCtx == nil {
var err error
r.kurtosisCtx, err = wrappers.GetDefaultKurtosisContext()
if err != nil {
return nil, fmt.Errorf("failed to create Kurtosis context: %w", err)
}
}
return r, nil
}
func (r *KurtosisRunner) Run(ctx context.Context, packageName string, args io.Reader) error {
if r.dryRun {
fmt.Printf("Dry run mode enabled, would run kurtosis package %s in enclave %s\n",
packageName, r.enclave)
if args != nil {
fmt.Println("\nWith arguments:")
if _, err := io.Copy(os.Stdout, args); err != nil {
return fmt.Errorf("failed to dump args: %w", err)
}
fmt.Println()
}
return nil
}
// Try to get existing enclave first
enclaveCtx, err := r.kurtosisCtx.GetEnclave(ctx, r.enclave)
if err != nil {
// If enclave doesn't exist, create a new one
fmt.Printf("Creating a new enclave for Starlark to run inside...\n")
enclaveCtx, err = r.kurtosisCtx.CreateEnclave(ctx, r.enclave)
if err != nil {
return fmt.Errorf("failed to create enclave: %w", err)
}
fmt.Printf("Enclave '%s' created successfully\n\n", r.enclave)
} else {
fmt.Printf("Using existing enclave '%s'\n\n", r.enclave)
}
// Set up run config with args if provided
var serializedParams string
if args != nil {
argsBytes, err := io.ReadAll(args)
if err != nil {
return fmt.Errorf("failed to read args: %w", err)
}
serializedParams = string(argsBytes)
}
runConfig := &starlark_run_config.StarlarkRunConfig{
SerializedParams: serializedParams,
}
stream, _, err := enclaveCtx.RunStarlarkPackage(ctx, packageName, runConfig)
if err != nil {
return fmt.Errorf("failed to run Kurtosis package: %w", err)
}
// Set up message handlers
var isRunSuccessful bool
runFinishedHandler := makeRunFinishedHandler(&isRunSuccessful)
// Combine custom handlers with default handler and run finished handler
handler := AllHandlers(append(r.runHandlers, defaultHandler, runFinishedHandler)...)
// Process the output stream
for responseLine := range stream {
if _, err := handler.Handle(ctx, responseLine); err != nil {
return err
}
}
if !isRunSuccessful {
return errors.New(printRed("kurtosis package execution failed"))
}
return nil
}
package run
import (
"context"
"fmt"
"testing"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/fake"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/interfaces"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRunKurtosis(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
testErr := fmt.Errorf("test error")
tests := []struct {
name string
responses []fake.StarlarkResponse
kurtosisErr error
getErr error
wantErr bool
}{
{
name: "successful run with all message types",
responses: []fake.StarlarkResponse{
{ProgressMsg: []string{"Starting deployment..."}},
{Info: "Preparing environment"},
{Instruction: "Executing package"},
{Warning: "Using default config"},
{Result: "Service started", HasResult: true},
{ProgressMsg: []string{"Deployment complete"}},
{IsSuccessful: true},
},
wantErr: false,
},
{
name: "run with error",
responses: []fake.StarlarkResponse{
{ProgressMsg: []string{"Starting deployment..."}},
{Err: &fake.StarlarkError{ExecutionErr: testErr}},
},
wantErr: true,
},
{
name: "run with unsuccessful completion",
responses: []fake.StarlarkResponse{
{ProgressMsg: []string{"Starting deployment..."}},
{IsSuccessful: false},
},
wantErr: true,
},
{
name: "kurtosis error",
kurtosisErr: fmt.Errorf("kurtosis failed"),
wantErr: true,
},
{
name: "uses existing enclave",
responses: []fake.StarlarkResponse{
{ProgressMsg: []string{"Using existing enclave"}},
{IsSuccessful: true},
},
getErr: nil,
wantErr: false,
},
{
name: "creates new enclave when get fails",
responses: []fake.StarlarkResponse{
{ProgressMsg: []string{"Creating new enclave"}},
{IsSuccessful: true},
},
getErr: fmt.Errorf("enclave not found"),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Convert test responses to interface slice
interfaceResponses := make([]interfaces.StarlarkResponse, len(tt.responses))
for i := range tt.responses {
interfaceResponses[i] = &tt.responses[i]
}
// Create a fake enclave context that will return our test responses
fakeCtx := &fake.KurtosisContext{
EnclaveCtx: &fake.EnclaveContext{
RunErr: tt.kurtosisErr,
Responses: interfaceResponses,
},
GetErr: tt.getErr,
}
kurtosisRunner, err := NewKurtosisRunner(
WithKurtosisRunnerDryRun(false),
WithKurtosisRunnerEnclave("test-enclave"),
WithKurtosisRunnerKurtosisContext(fakeCtx),
)
require.NoError(t, err)
err = kurtosisRunner.Run(ctx, "test-package", nil)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
This diff is collapsed.
//go:build !testonly
// +build !testonly
package wrappers
import (
"fmt"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/interfaces"
"github.com/kurtosis-tech/kurtosis/api/golang/engine/lib/kurtosis_context"
)
func GetDefaultKurtosisContext() (interfaces.KurtosisContextInterface, error) {
kCtx, err := kurtosis_context.NewKurtosisContextFromLocalEngine()
if err != nil {
return nil, fmt.Errorf("failed to create Kurtosis context: %w", err)
}
return KurtosisContextWrapper{
KurtosisContext: kCtx,
}, nil
}
//go:build testonly
// +build testonly
package wrappers
import (
"errors"
"github.com/exchain/go-exchain/kurtosis-devnet/pkg/kurtosis/api/interfaces"
)
func GetDefaultKurtosisContext() (interfaces.KurtosisContextInterface, error) {
return nil, errors.New("attempting to use local Kurtosis context in testonly mode")
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
package artifact
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"io"
"path/filepath"
"github.com/kurtosis-tech/kurtosis/api/golang/engine/lib/kurtosis_context"
)
// EnclaveContextIface abstracts the EnclaveContext for testing
type EnclaveContextIface interface {
DownloadFilesArtifact(ctx context.Context, name string) ([]byte, error)
}
type EnclaveFS struct {
enclaveCtx EnclaveContextIface
}
func NewEnclaveFS(ctx context.Context, enclave string) (*EnclaveFS, error) {
kurtosisCtx, err := kurtosis_context.NewKurtosisContextFromLocalEngine()
if err != nil {
return nil, err
}
enclaveCtx, err := kurtosisCtx.GetEnclaveContext(ctx, enclave)
if err != nil {
return nil, err
}
return &EnclaveFS{enclaveCtx: enclaveCtx}, nil
}
// NewEnclaveFSWithContext creates an EnclaveFS with a provided context (useful for testing)
func NewEnclaveFSWithContext(ctx EnclaveContextIface) *EnclaveFS {
return &EnclaveFS{enclaveCtx: ctx}
}
type Artifact struct {
reader *tar.Reader
}
func (fs *EnclaveFS) GetArtifact(ctx context.Context, name string) (*Artifact, error) {
artifact, err := fs.enclaveCtx.DownloadFilesArtifact(ctx, name)
if err != nil {
return nil, err
}
buffer := bytes.NewBuffer(artifact)
zipReader, err := gzip.NewReader(buffer)
if err != nil {
return nil, err
}
tarReader := tar.NewReader(zipReader)
return &Artifact{reader: tarReader}, nil
}
type ArtifactFileWriter struct {
path string
writer io.Writer
}
func NewArtifactFileWriter(path string, writer io.Writer) *ArtifactFileWriter {
return &ArtifactFileWriter{
path: path,
writer: writer,
}
}
func (a *Artifact) ExtractFiles(writers ...*ArtifactFileWriter) error {
paths := make(map[string]io.Writer)
for _, writer := range writers {
canonicalPath := filepath.Clean(writer.path)
paths[canonicalPath] = writer.writer
}
for {
header, err := a.reader.Next()
if err == io.EOF {
break
}
headerPath := filepath.Clean(header.Name)
if _, ok := paths[headerPath]; !ok {
continue
}
writer := paths[headerPath]
_, err = io.Copy(writer, a.reader)
if err != nil {
return err
}
}
return nil
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
name: github.com/ethereum-optimism/optimism/kurtosis-devnet/tests
description: |-
Kurtosis package for running tests within the enclave
replace: {}
This diff is collapsed.
{{- $context := or . (dict)}}
---
{{ include "templates/devnet.yaml" $context }}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment