diff --git a/.changeset/config.json b/.changeset/config.json index 64f132b826..b5d7d4bb33 100644 --- a/.changeset/config.json +++ b/.changeset/config.json @@ -11,6 +11,7 @@ "ensrainbow", "ensapi", "fallback-ensapi", + "ensrainbowbeam", "enssdk", "enscli", "enskit", diff --git a/.changeset/ensapi-omnigraph-labels.md b/.changeset/ensapi-omnigraph-labels.md new file mode 100644 index 0000000000..799c6cba6d --- /dev/null +++ b/.changeset/ensapi-omnigraph-labels.md @@ -0,0 +1,5 @@ +--- +"ensapi": minor +--- + +Omnigraph **`Query.labels`** improvements: add a **`LabelHash`** GraphQL scalar (`0x` + 64 lowercase hex, parsed via `parseLabelHash`), rename the input to **`LabelsByLabelHashesInput`** with field **`labelHashes`**, enforce stricter parsing/validation through the scalar layer, normalize mixed-case hex at parse time, cap batch size to **`100`** distinct LabelHashes per request (after deduplication) for a round-number limit aligned with the `inArray` workload, and keep development error masking aligned with Yoga defaults while ensuring intentional `GraphQLError`s still surface useful client messages where applicable. diff --git a/.changeset/ensrainbow-sdk-beam-client.md b/.changeset/ensrainbow-sdk-beam-client.md new file mode 100644 index 0000000000..adcabd436a --- /dev/null +++ b/.changeset/ensrainbow-sdk-beam-client.md @@ -0,0 +1,5 @@ +--- +"@ensnode/ensrainbow-sdk": minor +--- + +Add a light **EnsRainbowBeam** HTTP client (`EnsRainbowBeamClient`): `health()` and `discover()` against EnsRainbowBeam, client-side validation aligned with the server, `EnsRainbowBeamHttpError` for non-2xx responses with optional `{ message, details }` parsing, and subpath export `@ensnode/ensrainbow-sdk/ensrainbowbeam-client`. diff --git a/.changeset/ensrainbowbeam-app.md b/.changeset/ensrainbowbeam-app.md new file mode 100644 index 0000000000..77b9b508f1 --- /dev/null +++ b/.changeset/ensrainbowbeam-app.md @@ -0,0 +1,5 @@ +--- +"ensrainbowbeam": minor +--- + +Add **`EnsRainbowBeam`** (`apps/ensrainbowbeam`) exposing **`POST /api/discover`**, classifies each submitted label literal against ENSNode via **`labels(by: { labelHashes })`** (with client-side chunking aligned to ENSApi batch limits), emits structured JSON Lines to stdout for future sinks, mirrors other apps’ Dockerfile + Compose service patterns (`docker/services/ensrainbowbeam.yml`), and includes MIT **`LICENSE`** in the app directory ([issue \#2003](https://github.com/namehash/ensnode/issues/2003)). diff --git a/.changeset/enssdk-omnigraph-codegen.md b/.changeset/enssdk-omnigraph-codegen.md new file mode 100644 index 0000000000..24107964d1 --- /dev/null +++ b/.changeset/enssdk-omnigraph-codegen.md @@ -0,0 +1,5 @@ +--- +"enssdk": minor +--- + +Regenerate `enssdk/omnigraph` artifacts for the Omnigraph **`LabelHash`** scalar, mapped in `OmnigraphScalars` for typed **`graphql`** documents. diff --git a/.github/workflows/deploy_ensnode_blue_green.yml b/.github/workflows/deploy_ensnode_blue_green.yml index 36b8d3e9da..b42d37a742 100644 --- a/.github/workflows/deploy_ensnode_blue_green.yml +++ b/.github/workflows/deploy_ensnode_blue_green.yml @@ -28,6 +28,7 @@ jobs: ENSINDEXER_DOCKER_IMAGE: "ghcr.io/namehash/ensnode/ensindexer:${{ inputs.tag }}" ENSAPI_DOCKER_IMAGE: "ghcr.io/namehash/ensnode/ensapi:${{ inputs.tag }}" ENSRAINBOW_DOCKER_IMAGE: "ghcr.io/namehash/ensnode/ensrainbow:${{ inputs.tag }}" + ENSRAINBOWBEAM_DOCKER_IMAGE: "ghcr.io/namehash/ensnode/ensrainbowbeam:${{ inputs.tag }}" ENSADMIN_DOCKER_IMAGE: "ghcr.io/namehash/ensnode/ensadmin:${{ inputs.tag }}" RAILWAY_TOKEN: ${{ secrets.RAILWAY_TOKEN }} RAILWAY_PROJECT_ID: ${{ secrets.RAILWAY_PROJECT_ID }} @@ -45,6 +46,7 @@ jobs: docker manifest inspect ${{ env.ENSINDEXER_DOCKER_IMAGE }} || { echo "Given docker image does not exist: ${{ env.ENSINDEXER_DOCKER_IMAGE }}"; exit 1; } docker manifest inspect ${{ env.ENSAPI_DOCKER_IMAGE }} || { echo "Given docker image does not exist: ${{ env.ENSAPI_DOCKER_IMAGE }}"; exit 1; } docker manifest inspect ${{ env.ENSRAINBOW_DOCKER_IMAGE }} || { echo "Given docker image does not exist: ${{ env.ENSRAINBOW_DOCKER_IMAGE }}"; exit 1; } + docker manifest inspect ${{ env.ENSRAINBOWBEAM_DOCKER_IMAGE }} || { echo "Given docker image does not exist: ${{ env.ENSRAINBOWBEAM_DOCKER_IMAGE }}"; exit 1; } docker manifest inspect ${{ env.ENSADMIN_DOCKER_IMAGE }} || { echo "Given docker image does not exist: ${{ env.ENSADMIN_DOCKER_IMAGE }}"; exit 1; } - name: Print switch target @@ -76,6 +78,8 @@ jobs: echo "ENSRAINBOW_SVC_ID="${{ secrets.GREEN_ENSRAINBOW_SVC_ID }} >> "$GITHUB_ENV" #ENSRAINBOW SEARCHLIGHT echo "ENSRAINBOW_SEARCHLIGHT_SVC_ID="${{ secrets.GREEN_ENSRAINBOW_SEARCHLIGHT_SVC_ID }} >> "$GITHUB_ENV" + #ENSRAINBOWBEAM + echo "ENSRAINBOWBEAM_SVC_ID="${{ secrets.GREEN_ENSRAINBOWBEAM_SVC_ID }} >> "$GITHUB_ENV" #ENSADMIN echo "ENSADMIN_SVC_ID="${{ secrets.GREEN_ENSADMIN_SVC_ID }} >> "$GITHUB_ENV" echo "SLACK_TITLE=':large_green_circle: GREEN environment is now having new ENSNode version - '"${{ env.TAG }} >> "$GITHUB_ENV" @@ -102,6 +106,8 @@ jobs: echo "ENSRAINBOW_SVC_ID="${{ secrets.BLUE_ENSRAINBOW_SVC_ID }} >> "$GITHUB_ENV" #ENSRAINBOW SEARCHLIGHT echo "ENSRAINBOW_SEARCHLIGHT_SVC_ID="${{ secrets.BLUE_ENSRAINBOW_SEARCHLIGHT_SVC_ID }} >> "$GITHUB_ENV" + #ENSRAINBOWBEAM + echo "ENSRAINBOWBEAM_SVC_ID="${{ secrets.BLUE_ENSRAINBOWBEAM_SVC_ID }} >> "$GITHUB_ENV" #ENSADMIN echo "ENSADMIN_SVC_ID="${{ secrets.BLUE_ENSADMIN_SVC_ID }} >> "$GITHUB_ENV" echo "SLACK_TITLE=':large_blue_circle: BLUE environment is now having new ENSNode version - '"${{ env.TAG }} >> "$GITHUB_ENV" @@ -155,6 +161,8 @@ jobs: update_service_image ${RAILWAY_ENVIRONMENT_ID} ${ENSRAINBOW_SVC_ID} ${{ env.ENSRAINBOW_DOCKER_IMAGE }} #ENSRAINBOW SEARCHLIGHT update_service_image ${RAILWAY_ENVIRONMENT_ID} ${ENSRAINBOW_SEARCHLIGHT_SVC_ID} ${{ env.ENSRAINBOW_DOCKER_IMAGE }} + #ENSRAINBOWBEAM + update_service_image ${RAILWAY_ENVIRONMENT_ID} ${ENSRAINBOWBEAM_SVC_ID} ${{ env.ENSRAINBOWBEAM_DOCKER_IMAGE }} #ENSADMIN update_service_image ${RAILWAY_ENVIRONMENT_ID} ${ENSADMIN_SVC_ID} ${{ env.ENSADMIN_DOCKER_IMAGE }} @@ -216,6 +224,8 @@ jobs: redeploy_service ${RAILWAY_ENVIRONMENT_ID} ${ENSRAINBOW_SVC_ID} #ENSRAINBOW SEARCHLIGHT redeploy_service ${RAILWAY_ENVIRONMENT_ID} ${ENSRAINBOW_SEARCHLIGHT_SVC_ID} + #ENSRAINBOWBEAM + redeploy_service ${RAILWAY_ENVIRONMENT_ID} ${ENSRAINBOWBEAM_SVC_ID} #ENSADMIN redeploy_service ${RAILWAY_ENVIRONMENT_ID} ${ENSADMIN_SVC_ID} diff --git a/.github/workflows/deploy_ensnode_yellow.yml b/.github/workflows/deploy_ensnode_yellow.yml index baf448d1a6..93231f72b3 100644 --- a/.github/workflows/deploy_ensnode_yellow.yml +++ b/.github/workflows/deploy_ensnode_yellow.yml @@ -20,6 +20,7 @@ jobs: ENSINDEXER_DOCKER_IMAGE: "ghcr.io/namehash/ensnode/ensindexer:${{ inputs.tag }}" ENSAPI_DOCKER_IMAGE: "ghcr.io/namehash/ensnode/ensapi:${{ inputs.tag }}" ENSRAINBOW_DOCKER_IMAGE: "ghcr.io/namehash/ensnode/ensrainbow:${{ inputs.tag }}" + ENSRAINBOWBEAM_DOCKER_IMAGE: "ghcr.io/namehash/ensnode/ensrainbowbeam:${{ inputs.tag }}" ENSADMIN_DOCKER_IMAGE: "ghcr.io/namehash/ensnode/ensadmin:${{ inputs.tag }}" # Terraform related envs @@ -55,6 +56,7 @@ jobs: docker manifest inspect ${{ env.ENSINDEXER_DOCKER_IMAGE }} || { echo "Given docker image does not exist: ${{ env.ENSINDEXER_DOCKER_IMAGE }}"; exit 1; } docker manifest inspect ${{ env.ENSAPI_DOCKER_IMAGE }} || { echo "Given docker image does not exist: ${{ env.ENSAPI_DOCKER_IMAGE }}"; exit 1; } docker manifest inspect ${{ env.ENSRAINBOW_DOCKER_IMAGE }} || { echo "Given docker image does not exist: ${{ env.ENSRAINBOW_DOCKER_IMAGE }}"; exit 1; } + docker manifest inspect ${{ env.ENSRAINBOWBEAM_DOCKER_IMAGE }} || { echo "Given docker image does not exist: ${{ env.ENSRAINBOWBEAM_DOCKER_IMAGE }}"; exit 1; } docker manifest inspect ${{ env.ENSADMIN_DOCKER_IMAGE }} || { echo "Given docker image does not exist: ${{ env.ENSADMIN_DOCKER_IMAGE }}"; exit 1; } - name: Setup Terraform diff --git a/.github/workflows/deploy_switch_ensnode_environment.yml b/.github/workflows/deploy_switch_ensnode_environment.yml index 2b5255beb8..f080f6b194 100644 --- a/.github/workflows/deploy_switch_ensnode_environment.yml +++ b/.github/workflows/deploy_switch_ensnode_environment.yml @@ -92,6 +92,10 @@ jobs: # ENSRAINBOW SEARCHLIGHT redis-cli -u $REDIS_URL SET traefik/http/routers/ensrainbow-searchlight-api-router/service "${TARGET_ENVIRONMENT}-ensrainbow-searchlight-api" + # ENSRAINBOWBEAM + # NOTE: Router/service names must match Traefik IaC (likely in namehash-tf-iac-live). + redis-cli -u $REDIS_URL SET traefik/http/routers/ensrainbowbeam-api-router/service "${TARGET_ENVIRONMENT}-ensrainbowbeam-api" + - name: Promote ENSAdmin Vercel Deployment uses: ./.github/actions/promote_vercel_deployment with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7822968ab5..6a4de21ece 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,7 +13,7 @@ # # Published artifacts: # - NPM packages: @ensnode/* packages published to npm registry with @latest tag -# - Docker images: ensindexer, ensadmin, ensapi, ensrainbow published to ghcr.io with @latest tag +# - Docker images: ensindexer, ensadmin, ensapi, ensrainbow, ensrainbowbeam published to ghcr.io with @latest tag # - GitHub Release: Created with version tag (e.g., v1.2.3) and autogenerated release notes # # Version management: @@ -88,6 +88,7 @@ jobs: or .name == "ensadmin" or .name == "ensapi" or .name == "ensrainbow" + or .name == "ensrainbowbeam" )) - name: Filter Published Packages For NPM Packages diff --git a/.github/workflows/release_preview.yml b/.github/workflows/release_preview.yml index eca8ceb0fe..2f68b5a174 100644 --- a/.github/workflows/release_preview.yml +++ b/.github/workflows/release_preview.yml @@ -230,7 +230,7 @@ jobs: strategy: fail-fast: false matrix: - app: [ensindexer, ensadmin, ensapi, ensrainbow] + app: [ensindexer, ensadmin, ensapi, ensrainbow, ensrainbowbeam] steps: - name: Checkout repository uses: actions/checkout@v6 @@ -342,6 +342,7 @@ jobs: docker pull ghcr.io/namehash/ensnode/ensadmin:${{ needs.validate-and-prepare.outputs.docker-tag-base }}-${{ needs.validate-and-prepare.outputs.commit-sha }} docker pull ghcr.io/namehash/ensnode/ensapi:${{ needs.validate-and-prepare.outputs.docker-tag-base }}-${{ needs.validate-and-prepare.outputs.commit-sha }} docker pull ghcr.io/namehash/ensnode/ensrainbow:${{ needs.validate-and-prepare.outputs.docker-tag-base }}-${{ needs.validate-and-prepare.outputs.commit-sha }} + docker pull ghcr.io/namehash/ensnode/ensrainbowbeam:${{ needs.validate-and-prepare.outputs.docker-tag-base }}-${{ needs.validate-and-prepare.outputs.commit-sha }} \`\`\` " diff --git a/.github/workflows/release_snapshot.yml b/.github/workflows/release_snapshot.yml index 44040a97eb..fe987176c1 100644 --- a/.github/workflows/release_snapshot.yml +++ b/.github/workflows/release_snapshot.yml @@ -12,7 +12,7 @@ # # Published artifacts: # - NPM packages: All @ensnode/* packages published with @next tag -# - Docker images: ensindexer, ensadmin, ensapi, ensrainbow published with @next tag +# - Docker images: ensindexer, ensadmin, ensapi, ensrainbow, ensrainbowbeam published with @next tag # - No GitHub releases or tags are created # # Version behavior: @@ -96,7 +96,7 @@ jobs: strategy: fail-fast: false matrix: - app: [ensindexer, ensadmin, ensrainbow, ensapi] + app: [ensindexer, ensadmin, ensrainbow, ensapi, ensrainbowbeam] steps: - name: Checkout repository uses: actions/checkout@v6 diff --git a/apps/ensapi/src/omnigraph-api/builder.ts b/apps/ensapi/src/omnigraph-api/builder.ts index eafbae8696..7cec8b79ea 100644 --- a/apps/ensapi/src/omnigraph-api/builder.ts +++ b/apps/ensapi/src/omnigraph-api/builder.ts @@ -11,6 +11,7 @@ import type { Hex, InterpretedLabel, InterpretedName, + LabelHash, Node, NormalizedAddress, PermissionsId, @@ -60,6 +61,7 @@ export type BuilderScalars = { BigInt: { Input: bigint; Output: bigint }; Address: { Input: NormalizedAddress; Output: NormalizedAddress }; Hex: { Input: Hex; Output: Hex }; + LabelHash: { Input: LabelHash; Output: LabelHash }; ChainId: { Input: ChainId; Output: ChainId }; CoinType: { Input: CoinType; Output: CoinType }; Node: { Input: Node; Output: Node }; diff --git a/apps/ensapi/src/omnigraph-api/schema/label.integration.test.ts b/apps/ensapi/src/omnigraph-api/schema/label.integration.test.ts new file mode 100644 index 0000000000..3384eb984c --- /dev/null +++ b/apps/ensapi/src/omnigraph-api/schema/label.integration.test.ts @@ -0,0 +1,142 @@ +import { + asInterpretedLabel, + encodeLabelHash, + type InterpretedLabel, + type LabelHash, + labelhashInterpretedLabel, + parseLabelHash, +} from "enssdk"; +import { describe, expect, it } from "vitest"; + +import { LABELS_BY_LABELHASH_MAX } from "@/omnigraph-api/schema/label"; +import { request } from "@/test/integration/graphql-utils"; +import { gql } from "@/test/integration/omnigraph-api-client"; + +type LabelsByLabelHashResult = { + labels: Array<{ hash: LabelHash; interpreted: InterpretedLabel }>; +}; + +const LabelsByLabelHash = gql` + query LabelsByLabelHash($labelHashes: [LabelHash!]!) { + labels(by: { labelHashes: $labelHashes }) { + hash + interpreted + } + } +`; + +// 'eth' is always seeded in the devnet fixture as a healed label +const ETH_LABEL_HASH: LabelHash = labelhashInterpretedLabel(asInterpretedLabel("eth")); + +// a LabelHash that should not exist in the index (deterministic dummy bytes) +const ABSENT_LABEL_HASH = parseLabelHash(`0x${"ff".repeat(32)}`); + +describe("Query.labels", () => { + it("returns a healed label entry for a known LabelHash", async () => { + await expect( + request(LabelsByLabelHash, { labelHashes: [ETH_LABEL_HASH] }), + ).resolves.toMatchObject({ + labels: [{ hash: ETH_LABEL_HASH, interpreted: "eth" }], + }); + }); + + it("accepts non-normalized (mixed-case hex digits) LabelHash variables and resolves matches", async () => { + // Lowercase `0x` prefix only; uppercase `0X` is rejected (see enssdk `parseLabelHash`). + const mixedCaseVariable = `0x${ETH_LABEL_HASH.slice(2) + .split("") + .map((c, i) => (i % 2 === 0 ? c.toUpperCase() : c)) + .join("")}` as LabelHash; + expect(parseLabelHash(mixedCaseVariable)).toBe(ETH_LABEL_HASH); + + await expect( + request(LabelsByLabelHash, { + labelHashes: [mixedCaseVariable], + }), + ).resolves.toMatchObject({ + labels: [{ hash: ETH_LABEL_HASH, interpreted: "eth" }], + }); + }); + + it("rejects uppercase 0X hex prefix", async () => { + const badPrefix = `0X${ETH_LABEL_HASH.slice(2)}`; + await expect(request(LabelsByLabelHash, { labelHashes: [badPrefix] })).rejects.toThrow( + /Invalid labelHash/i, + ); + }); + + it("omits LabelHashes that are not present in the index", async () => { + await expect( + request(LabelsByLabelHash, { labelHashes: [ABSENT_LABEL_HASH] }), + ).resolves.toEqual({ labels: [] }); + }); + + it("returns only the present labels when input mixes present and absent LabelHashes", async () => { + await expect( + request(LabelsByLabelHash, { + labelHashes: [ETH_LABEL_HASH, ABSENT_LABEL_HASH], + }), + ).resolves.toMatchObject({ + labels: [{ hash: ETH_LABEL_HASH }], + }); + }); + + it("dedupes repeated input LabelHashes", async () => { + await expect( + request(LabelsByLabelHash, { + labelHashes: [ETH_LABEL_HASH, ETH_LABEL_HASH, ETH_LABEL_HASH], + }), + ).resolves.toMatchObject({ + labels: [{ hash: ETH_LABEL_HASH }], + }); + }); + + it("returns an empty list when input is empty", async () => { + await expect(request(LabelsByLabelHash, { labelHashes: [] })).resolves.toEqual({ labels: [] }); + }); + + it("classifies returned labels: 'eth' is healed (interpreted !== encodeLabelHash(hash))", async () => { + const { labels } = await request(LabelsByLabelHash, { + labelHashes: [ETH_LABEL_HASH], + }); + + expect(labels).toHaveLength(1); + expect(labels[0].interpreted).not.toEqual(encodeLabelHash(ETH_LABEL_HASH)); + }); + + it("rejects junk strings that cannot be parsed as LabelHashes", async () => { + await expect( + request(LabelsByLabelHash, { + labelHashes: ["not-even-hex"], + }), + ).rejects.toThrow(/Invalid labelHash/i); + }); + + it("rejects hex values that are not exactly 32 bytes", async () => { + await expect( + request(LabelsByLabelHash, { + labelHashes: ["0x00"], + }), + ).rejects.toThrow(/Invalid labelHash/i); + }); + + it("rejects requests over the maximum allowed distinct LabelHash count", async () => { + const labelHashes: LabelHash[] = []; + for (let i = 0; i <= LABELS_BY_LABELHASH_MAX; i++) { + labelHashes.push(parseLabelHash(`0x${i.toString(16).padStart(64, "0")}`)); + } + + await expect(request(LabelsByLabelHash, { labelHashes })).rejects.toThrow( + /Too many distinct LabelHashes/i, + ); + }); + + it("allows input with duplicate LabelHashes when the distinct count is within the max", async () => { + await expect( + request(LabelsByLabelHash, { + labelHashes: [ETH_LABEL_HASH, ETH_LABEL_HASH, ETH_LABEL_HASH], + }), + ).resolves.toMatchObject({ + labels: [{ hash: ETH_LABEL_HASH, interpreted: "eth" }], + }); + }); +}); diff --git a/apps/ensapi/src/omnigraph-api/schema/label.ts b/apps/ensapi/src/omnigraph-api/schema/label.ts index 2ea3fec004..d57cc8621a 100644 --- a/apps/ensapi/src/omnigraph-api/schema/label.ts +++ b/apps/ensapi/src/omnigraph-api/schema/label.ts @@ -1,3 +1,5 @@ +import { OMNIGRAPH_LABELS_BY_LABELHASH_MAX } from "enssdk"; + import type { ensIndexerSchema } from "@/lib/ensdb/singleton"; import { builder } from "@/omnigraph-api/builder"; @@ -11,7 +13,7 @@ LabelRef.implement({ hash: t.field({ description: "The Label's LabelHash\n(@see https://ensnode.io/docs/reference/terminology#labels-labelhashes-labelhash-function)", - type: "Hex", + type: "LabelHash", nullable: false, resolve: (parent) => parent.labelHash, }), @@ -28,3 +30,26 @@ LabelRef.implement({ }), }), }); + +////////// +// Inputs +////////// + +/** + * Maximum number of LabelHashes accepted per `Query.labels` request. + * + * Caps the resolver's `inArray` query so a single GraphQL request cannot enumerate + * the entire `label` table. + */ +export const LABELS_BY_LABELHASH_MAX = OMNIGRAPH_LABELS_BY_LABELHASH_MAX; + +export const LabelsByLabelHashesInput = builder.inputType("LabelsByLabelHashesInput", { + description: "Look up Labels by a batch of LabelHashes.", + fields: (t) => ({ + labelHashes: t.field({ + type: ["LabelHash"], + required: true, + description: `LabelHashes to look up. After deduplication, at most ${LABELS_BY_LABELHASH_MAX} distinct LabelHashes per request (each normalized to lowercase at parse time). LabelHashes absent from the index are omitted from the result.`, + }), + }), +}); diff --git a/apps/ensapi/src/omnigraph-api/schema/query.ts b/apps/ensapi/src/omnigraph-api/schema/query.ts index 4e5df509f7..01fdc852df 100644 --- a/apps/ensapi/src/omnigraph-api/schema/query.ts +++ b/apps/ensapi/src/omnigraph-api/schema/query.ts @@ -1,7 +1,9 @@ import config from "@/config"; import { type ResolveCursorConnectionArgs, resolveCursorConnection } from "@pothos/plugin-relay"; +import { inArray } from "drizzle-orm"; import { makeConcreteRegistryId, makePermissionsId, makeResolverId } from "enssdk"; +import { createGraphQLError } from "graphql-yoga"; import { getRootRegistryId } from "@ensnode/ensnode-sdk"; @@ -25,6 +27,11 @@ import { DomainsOrderInput, DomainsWhereInput, } from "@/omnigraph-api/schema/domain"; +import { + LABELS_BY_LABELHASH_MAX, + LabelRef, + LabelsByLabelHashesInput, +} from "@/omnigraph-api/schema/label"; import { PermissionsIdInput, PermissionsRef } from "@/omnigraph-api/schema/permissions"; import { RegistrationInterfaceRef } from "@/omnigraph-api/schema/registration"; import { RegistryIdInput, RegistryInterfaceRef } from "@/omnigraph-api/schema/registry"; @@ -140,6 +147,41 @@ builder.queryType({ }, }), + ///////////////////////// + // Find Labels by Hashes + ///////////////////////// + labels: t.field({ + description: + "Look up Labels in the index by a batch of LabelHashes. " + + "Each returned Label exposes its `hash` and `interpreted` representation, where " + + "`interpreted` is the Encoded LabelHash for unhealed/unknown labels and a normalized " + + "literal for healed labels. LabelHashes that are not present in the index are simply " + + "omitted from the result.", + type: [LabelRef], + nullable: false, + args: { by: t.arg({ type: LabelsByLabelHashesInput, required: true }) }, + resolve: async (_parent, { by }) => { + if (by.labelHashes.length === 0) return []; + + const dedupedHashes = Array.from(new Set(by.labelHashes)); + + if (dedupedHashes.length > LABELS_BY_LABELHASH_MAX) { + // Use `createGraphQLError` so the client-facing validation message survives Yoga's + // default `maskError`, which (correctly) hides plain `Error` instances as + // "Unexpected error.". + throw createGraphQLError( + `Too many distinct LabelHashes: received ${dedupedHashes.length}, max ${LABELS_BY_LABELHASH_MAX}.`, + { extensions: { code: "BAD_USER_INPUT" } }, + ); + } + + return ensDb + .select() + .from(ensIndexerSchema.label) + .where(inArray(ensIndexerSchema.label.labelHash, dedupedHashes)); + }, + }), + ///////////////////////////////////// // Get Account by Id or Address ///////////////////////////////////// diff --git a/apps/ensapi/src/omnigraph-api/schema/scalars.ts b/apps/ensapi/src/omnigraph-api/schema/scalars.ts index 9ccbbf235a..4fcedd192d 100644 --- a/apps/ensapi/src/omnigraph-api/schema/scalars.ts +++ b/apps/ensapi/src/omnigraph-api/schema/scalars.ts @@ -7,18 +7,21 @@ import { type Hex, isInterpretedLabel, isInterpretedName, + type LabelHash, type Name, type Node, type NormalizedAddress, type PermissionsId, type PermissionsResourceId, type PermissionsUserId, + parseLabelHash, type RegistrationId, type RegistryId, type RenewalId, type ResolverId, type ResolverRecordsId, } from "enssdk"; +import { createGraphQLError } from "graphql-yoga"; import { isHex, size } from "viem"; import { z } from "zod/v4"; @@ -61,6 +64,20 @@ builder.scalarType("Hex", { .parse(value), }); +builder.scalarType("LabelHash", { + description: + "LabelHash represents enssdk#LabelHash: a 32-byte (64 hex digit) value, `0x`-prefixed and lowercased.", + serialize: (value: LabelHash) => value, + parseValue: (value) => { + try { + return parseLabelHash(z.coerce.string().parse(value)); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + throw createGraphQLError(message, { extensions: { code: "BAD_USER_INPUT" } }); + } + }, +}); + builder.scalarType("ChainId", { description: "ChainId represents a enssdk#ChainId.", serialize: (value: ChainId) => value, diff --git a/apps/ensapi/src/omnigraph-api/yoga.ts b/apps/ensapi/src/omnigraph-api/yoga.ts index 528e80546d..ac79cbb1f4 100644 --- a/apps/ensapi/src/omnigraph-api/yoga.ts +++ b/apps/ensapi/src/omnigraph-api/yoga.ts @@ -2,7 +2,7 @@ // import { maxDepthPlugin } from "@escape.tech/graphql-armor-max-depth"; // import { maxTokensPlugin } from "@escape.tech/graphql-armor-max-tokens"; -import { createYoga } from "graphql-yoga"; +import { createYoga, maskError } from "graphql-yoga"; import { makeLogger } from "@/lib/logger"; import { context } from "@/omnigraph-api/context"; @@ -16,6 +16,25 @@ export const yoga = createYoga({ context, // CORS is handled by the Hono middleware in app.ts cors: false, + // Error masking: + // - Production: use Yoga defaults so internal details are not exposed to clients. + // - Non-production: still apply the same masked client payload, but log the **original** + // error server-side first. This makes debugging much easier than only seeing the masked + // message, while keeping the client-facing behavior aligned with production. + // + // Motivation: some resolvers intentionally throw `GraphQLError` (e.g. validation for + // `Query.labels`), but other code paths may throw plain `Error`. Yoga's default `maskError` + // maps unknown errors to a generic "Unexpected error." on the client; logging here ensures + // the real stack/message is still visible in local/staging logs. + maskedErrors: + process.env.NODE_ENV === "production" + ? true + : { + maskError(error, message, isDev) { + logger.error(error); + return maskError(error, message, isDev); + }, + }, graphiql: { defaultQuery: `query DomainsByOwner { account(by: { address: "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" }) { diff --git a/apps/ensrainbow/src/commands/entrypoint-command.ts b/apps/ensrainbow/src/commands/entrypoint-command.ts index a45e6e9957..fa24742367 100644 --- a/apps/ensrainbow/src/commands/entrypoint-command.ts +++ b/apps/ensrainbow/src/commands/entrypoint-command.ts @@ -351,10 +351,18 @@ async function runDbBootstrap( throw new BootstrapAbortedError(); } - // Write marker only after a successful attach. + const dbConfig = await buildDbConfig(ensRainbowServer); + + if (signal.aborted) { + throw new BootstrapAbortedError(); + } + + // Write marker only after a successful attach AND a successful `buildDbConfig` so the + // next start does not enter the existing-DB reuse path with a database that has never + // passed readiness checks (e.g. missing precalculated record count). await writeFile(markerFile, ""); - return await buildDbConfig(ensRainbowServer); + return dbConfig; } catch (error) { if (!dbAttached) { await safeClose(db); diff --git a/apps/ensrainbowbeam/.env.local.example b/apps/ensrainbowbeam/.env.local.example new file mode 100644 index 0000000000..11d0bf219d --- /dev/null +++ b/apps/ensrainbowbeam/.env.local.example @@ -0,0 +1,11 @@ +# Port EnsRainbowBeam listens on. +PORT=4444 + +# Base URL of an ENSNode (ENSApi) instance that exposes the Omnigraph GraphQL endpoint. +# EnsRainbowBeam calls `${ENSNODE_URL}/api/omnigraph` to classify submitted labels. +ENSNODE_URL=http://localhost:4334 + +# Comma-separated allowlist of CORS origins for browser calls. +# Example for local dev + prod marketing site: +# CORS_ORIGINS=http://localhost:4321,https://ensrainbow.io +CORS_ORIGINS=http://localhost:4321 diff --git a/apps/ensrainbowbeam/Dockerfile b/apps/ensrainbowbeam/Dockerfile new file mode 100644 index 0000000000..b6f0394d04 --- /dev/null +++ b/apps/ensrainbowbeam/Dockerfile @@ -0,0 +1,17 @@ +FROM node:24-slim AS base +ENV PNPM_HOME="/pnpm" +ENV PATH="$PNPM_HOME:$PATH" +RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* +RUN corepack enable +WORKDIR /app + +FROM base AS deps +COPY . . +RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --frozen-lockfile + +FROM deps AS runner +WORKDIR /app/apps/ensrainbowbeam +ENV NODE_ENV=production +EXPOSE 4444 + +CMD ["pnpm", "start"] diff --git a/apps/ensrainbowbeam/LICENSE b/apps/ensrainbowbeam/LICENSE new file mode 100644 index 0000000000..0d70998c14 --- /dev/null +++ b/apps/ensrainbowbeam/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2026 NameHash + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/apps/ensrainbowbeam/README.md b/apps/ensrainbowbeam/README.md new file mode 100644 index 0000000000..6803b55420 --- /dev/null +++ b/apps/ensrainbowbeam/README.md @@ -0,0 +1,54 @@ +# EnsRainbowBeam + +Workspace package: `ensrainbowbeam` (`apps/ensrainbowbeam`). + +Receives ENS label submissions from external callers, classifies each label against ENSNode's +indexed Label table, and (for now) emits a structured JSON line per submission to stdout. + +The app is intentionally minimal; persistent storage, batched on-chain emission, and a +caller-leaderboard are explicitly deferred to follow-up work (see GitHub issue +[#2003](https://github.com/namehash/ensnode/issues/2003)). The submission JSONL shape is the +future row shape so adding a sink later is mechanical. + +## Endpoints + +- `GET /health` — liveness probe; always returns `{ message: "ok" }`. +- `POST /api/discover` — accepts `{ labels: string[], callerAddress: Address }` and responds + with per-label classification (`unknown_in_index` / `healed_in_index` / `absent_from_index`). + +## How label classification works + +For each submitted raw label EnsRainbowBeam: + +1. Computes `labelhashLiteralLabel(asLiteralLabel(rawLabel))`. +2. If the label is normalizable AND the normalized form differs from the raw label, also + computes `labelhashLiteralLabel` for that normalized literal (still a literal-string path — + input is never treated as an Encoded LabelHash before hashing). +3. Sends every distinct LabelHash to ENSNode via the typed `enssdk/omnigraph` client using + the `labels(by: { labelHashes })` query (batched when a submission exceeds the Omnigraph + per-request cap). +4. Classifies each submitted label: + - `unknown_in_index` — at least one of its hashes is present in the index but not yet + healed (i.e. `interpreted` is the encoded labelhash form). These are the interesting + submissions for future on-chain emission. + - `healed_in_index` — at least one of its hashes is present in the index and all + returned hits are already healed. + - `absent_from_index` — none of its hashes are present in the index. + +## Configuration + +| Env var | Required | Description | +|---------|----------|-------------| +| `PORT` | no (default `4444`) | HTTP listen port. | +| `ENSNODE_URL` | yes | Base URL of an ENSNode (ENSApi) instance with Omnigraph at `/api/omnigraph`. | +| `CORS_ORIGINS` | no | Comma-separated allowlist of CORS origins (e.g. `https://ensrainbow.io,http://localhost:4321`). When unset/empty, no cross-origin requests are allowed. | + +See `.env.local.example` for a local-development template. + +## Development + +```bash +npx pnpm@10.13.1 -F ensrainbowbeam dev +npx pnpm@10.13.1 -F ensrainbowbeam typecheck +npx pnpm@10.13.1 -F ensrainbowbeam test +``` diff --git a/apps/ensrainbowbeam/package.json b/apps/ensrainbowbeam/package.json new file mode 100644 index 0000000000..704339a743 --- /dev/null +++ b/apps/ensrainbowbeam/package.json @@ -0,0 +1,38 @@ +{ + "private": true, + "name": "ensrainbowbeam", + "version": "1.10.1", + "type": "module", + "description": "EnsRainbowBeam — ingests label submissions and classifies them against ENSNode's Omnigraph index", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/namehash/ensnode.git", + "directory": "apps/ensrainbowbeam" + }, + "homepage": "https://github.com/namehash/ensnode/tree/main/apps/ensrainbowbeam", + "scripts": { + "start": "tsx src/index.ts", + "dev": "tsx watch --env-file ./.env.local src/index.ts", + "test": "vitest", + "lint": "biome check --write .", + "lint:ci": "biome ci", + "typecheck": "tsgo --noEmit" + }, + "dependencies": { + "@ensnode/ensnode-sdk": "workspace:*", + "@hono/node-server": "catalog:", + "enssdk": "workspace:*", + "graphql": "^16.11.0", + "hono": "catalog:", + "viem": "catalog:", + "zod": "catalog:" + }, + "devDependencies": { + "@ensnode/shared-configs": "workspace:*", + "@types/node": "catalog:", + "tsx": "^4.19.3", + "typescript": "catalog:", + "vitest": "catalog:" + } +} diff --git a/apps/ensrainbowbeam/src/app.ts b/apps/ensrainbowbeam/src/app.ts new file mode 100644 index 0000000000..a1974a66b3 --- /dev/null +++ b/apps/ensrainbowbeam/src/app.ts @@ -0,0 +1,33 @@ +import { config } from "@/config"; + +import { Hono } from "hono"; +import { cors } from "hono/cors"; + +import { healthHandler } from "@/handlers/health"; +import { submissionsHandler } from "@/handlers/submissions"; +import { errorResponse } from "@/lib/error-response"; + +const app = new Hono(); + +app.use( + cors({ + origin: (origin) => (config.corsOrigins.includes(origin) ? origin : undefined), + allowMethods: ["GET", "POST", "OPTIONS"], + allowHeaders: ["Content-Type"], + maxAge: 86400, + }), +); + +app.get("/health", healthHandler); + +app.post("/api/discover", submissionsHandler); + +app.notFound((c) => errorResponse(c, { message: "Not Found", status: 404 })); + +app.onError((error, c) => { + console.error("[ensrainbowbeam] unhandled error", error); + // Do not leak the underlying error message to clients; respond with a generic 500. + return errorResponse(c, { message: "Internal Server Error", status: 500 }); +}); + +export default app; diff --git a/apps/ensrainbowbeam/src/config.ts b/apps/ensrainbowbeam/src/config.ts new file mode 100644 index 0000000000..af57cfd91d --- /dev/null +++ b/apps/ensrainbowbeam/src/config.ts @@ -0,0 +1,37 @@ +import { z } from "zod/v4"; + +import { OptionalPortNumberSchema } from "@ensnode/ensnode-sdk/internal"; + +/** + * Default port for EnsRainbowBeam. Used when `PORT` env var is unset. + */ +export const ENSRAINBOWBEAM_DEFAULT_PORT = 4444; + +function parseCorsOrigins(value: string | undefined): string[] { + if (!value) return []; + return value + .split(",") + .map((origin) => origin.trim()) + .filter((origin) => origin.length > 0); +} + +const ConfigSchema = z.object({ + PORT: OptionalPortNumberSchema.default(ENSRAINBOWBEAM_DEFAULT_PORT), + ENSNODE_URL: z.string().url(), + CORS_ORIGINS: z.string().optional(), +}); + +const parsed = ConfigSchema.parse(process.env); + +/** + * Process configuration parsed from `process.env` at module load. + * + * Throws (via Zod) if any required env var is missing or invalid. + */ +export const config = { + port: parsed.PORT, + ensNodeUrl: parsed.ENSNODE_URL, + corsOrigins: parseCorsOrigins(parsed.CORS_ORIGINS), +}; + +export type Config = typeof config; diff --git a/apps/ensrainbowbeam/src/handlers/health.ts b/apps/ensrainbowbeam/src/handlers/health.ts new file mode 100644 index 0000000000..28f79d883f --- /dev/null +++ b/apps/ensrainbowbeam/src/handlers/health.ts @@ -0,0 +1,5 @@ +import type { Context } from "hono"; + +export function healthHandler(c: Context) { + return c.json({ message: "ok" }); +} diff --git a/apps/ensrainbowbeam/src/handlers/submissions.test.ts b/apps/ensrainbowbeam/src/handlers/submissions.test.ts new file mode 100644 index 0000000000..15eb987cbe --- /dev/null +++ b/apps/ensrainbowbeam/src/handlers/submissions.test.ts @@ -0,0 +1,263 @@ +import { + encodeLabelHash, + type InterpretedLabel, + type LabelHash, + type LiteralLabel, + labelhashLiteralLabel, +} from "enssdk"; +import { Hono } from "hono"; +import { afterAll, beforeEach, describe, expect, it, vi } from "vitest"; + +vi.mock("@/lib/omnigraph-client", () => ({ + lookupLabels: vi.fn(), +})); + +import { errorResponse } from "@/lib/error-response"; +import type { LabelHit } from "@/lib/labels"; +import { lookupLabels } from "@/lib/omnigraph-client"; + +import { + OMNIGRAPH_LOOKUP_TIMEOUT_MS, + type SubmissionsResponse, + submissionsHandler, +} from "./submissions"; + +const mockedLookup = vi.mocked(lookupLabels); + +function makeApp() { + const app = new Hono(); + app.post("/api/discover", submissionsHandler); + app.onError((error, c) => errorResponse(c, { error })); + return app; +} + +const CALLER = "0x1234567890123456789012345678901234567890"; + +describe("POST /api/discover", () => { + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + const consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + + beforeEach(() => { + mockedLookup.mockReset(); + consoleSpy.mockClear(); + consoleErrorSpy.mockClear(); + }); + + afterAll(() => { + consoleSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + it("400s on malformed JSON", async () => { + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: "not-json", + }); + expect(res.status).toBe(400); + expect(mockedLookup).not.toHaveBeenCalled(); + }); + + it("400s when labels is empty", async () => { + const app = makeApp(); + mockedLookup.mockResolvedValue([]); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ labels: [], callerAddress: CALLER }), + }); + expect(res.status).toBe(400); + expect(mockedLookup).not.toHaveBeenCalled(); + }); + + it("400s when callerAddress is not a valid EVM address", async () => { + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ labels: ["foo"], callerAddress: "not-an-address" }), + }); + expect(res.status).toBe(400); + expect(mockedLookup).not.toHaveBeenCalled(); + }); + + it("classifies a healed label correctly and emits exactly one log line", async () => { + const ethHash = labelhashLiteralLabel("eth" as LiteralLabel); + mockedLookup.mockResolvedValue([ + { hash: ethHash, interpreted: "eth" as InterpretedLabel } satisfies LabelHit, + ]); + + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ labels: ["eth"], callerAddress: CALLER }), + }); + + expect(res.status).toBe(200); + const json = (await res.json()) as SubmissionsResponse; + expect(json.callerAddress).toBe(CALLER); + expect(json.results).toHaveLength(1); + expect(json.results[0]).toMatchObject({ + rawLabel: "eth", + labelHash: ethHash, + status: "healed_in_index", + }); + + expect(consoleSpy).toHaveBeenCalledTimes(1); + const [loggedLine] = consoleSpy.mock.calls[0] as [string]; + const parsed = JSON.parse(loggedLine) as { + ts: string; + requestId: string; + callerAddress: string; + items: Array<{ status: string }>; + }; + expect(parsed.callerAddress).toBe(CALLER); + expect(parsed.items[0].status).toBe("healed_in_index"); + }); + + it("classifies an unhealed label as unknown_in_index", async () => { + const fooHash = labelhashLiteralLabel("foo" as LiteralLabel); + mockedLookup.mockResolvedValue([ + { hash: fooHash, interpreted: encodeLabelHash(fooHash) as InterpretedLabel }, + ]); + + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ labels: ["foo"], callerAddress: CALLER }), + }); + + expect(res.status).toBe(200); + const json = (await res.json()) as SubmissionsResponse; + expect(json.results[0].status).toBe("unknown_in_index"); + }); + + it("classifies an absent label as absent_from_index", async () => { + mockedLookup.mockResolvedValue([]); + + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + labels: ["zzznever-existszzz"], + callerAddress: CALLER, + }), + }); + + expect(res.status).toBe(200); + const json = (await res.json()) as SubmissionsResponse; + expect(json.results[0].status).toBe("absent_from_index"); + }); + + it("normalizes the callerAddress to lowercase in both the response and the log", async () => { + mockedLookup.mockResolvedValue([]); + const mixedCase = "0xAaAaAaAaAaAaAaAaAaAaAaAaAaAaAaAaAaAaAaAa"; + + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ labels: ["foo"], callerAddress: mixedCase }), + }); + + expect(res.status).toBe(200); + const json = (await res.json()) as SubmissionsResponse; + expect(json.callerAddress).toBe(mixedCase.toLowerCase()); + + const [loggedLine] = consoleSpy.mock.calls[0] as [string]; + const parsed = JSON.parse(loggedLine) as { callerAddress: string }; + expect(parsed.callerAddress).toBe(mixedCase.toLowerCase()); + }); + + it("includes normalized variants in the output when raw differs from normalized", async () => { + mockedLookup.mockResolvedValue([]); + + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ labels: ["VITALIK"], callerAddress: CALLER }), + }); + + expect(res.status).toBe(200); + const json = (await res.json()) as SubmissionsResponse; + expect(json.results[0]).toMatchObject({ + rawLabel: "VITALIK", + normalizedLabel: "vitalik", + }); + expect(json.results[0].normalizedLabelHash).toBeDefined(); + }); + + it("rejects oversized batches", async () => { + const labels = Array.from({ length: 101 }, (_, i) => `label-${i}`); + + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ labels, callerAddress: CALLER }), + }); + expect(res.status).toBe(400); + expect(mockedLookup).not.toHaveBeenCalled(); + }); + + it("returns 504 when the omnigraph lookup times out", async () => { + mockedLookup.mockRejectedValue(new DOMException("The operation timed out.", "TimeoutError")); + + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ labels: ["foo"], callerAddress: CALLER }), + }); + + expect(res.status).toBe(504); + const json = (await res.json()) as { message: string }; + expect(json.message).toBe( + `Omnigraph labels lookup timed out after ${OMNIGRAPH_LOOKUP_TIMEOUT_MS}ms`, + ); + expect(consoleSpy).not.toHaveBeenCalled(); + expect(consoleErrorSpy).toHaveBeenCalled(); + }); + + it("returns 502 when the omnigraph lookup fails with a generic error", async () => { + mockedLookup.mockRejectedValue(new Error("upstream exploded")); + + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ labels: ["foo"], callerAddress: CALLER }), + }); + + expect(res.status).toBe(502); + const json = (await res.json()) as { message: string }; + expect(json.message).toBe("Upstream Omnigraph lookup failed"); + // The underlying error must be logged (not swallowed) so 502s are debuggable, while + // the response itself stays generic and does not leak upstream details. + expect(consoleErrorSpy).toHaveBeenCalled(); + expect(consoleSpy).not.toHaveBeenCalled(); + }); + + it("dedupes labelhashes before calling the omnigraph client", async () => { + mockedLookup.mockResolvedValue([]); + + const app = makeApp(); + const res = await app.request("/api/discover", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ labels: ["foo", "foo", "foo"], callerAddress: CALLER }), + }); + + expect(res.status).toBe(200); + expect(mockedLookup).toHaveBeenCalledTimes(1); + + const fooHash = labelhashLiteralLabel("foo" as LiteralLabel); + const passedHashes = mockedLookup.mock.calls[0][0] as readonly LabelHash[]; + expect(new Set(passedHashes)).toEqual(new Set([fooHash])); + }); +}); diff --git a/apps/ensrainbowbeam/src/handlers/submissions.ts b/apps/ensrainbowbeam/src/handlers/submissions.ts new file mode 100644 index 0000000000..f2be9e3974 --- /dev/null +++ b/apps/ensrainbowbeam/src/handlers/submissions.ts @@ -0,0 +1,158 @@ +import { type Address, asLiteralLabel, type LabelHash, type LiteralLabel } from "enssdk"; +import type { Context } from "hono"; +import { isAddress } from "viem"; +import { z } from "zod/v4"; + +import { errorResponse } from "@/lib/error-response"; +import { + classifySubmissions, + collectLookupHashes, + hashLabel, + type LabelClassification, + type LabelHit, +} from "@/lib/labels"; +import { lookupLabels } from "@/lib/omnigraph-client"; + +/** + * Maximum number of raw labels accepted per `POST /api/discover` request. + * + * This is independent of how many labelhashes each label expands into (1 if already + * normalized / unnormalizable, 2 if it has a distinct normalized form). `lookupLabels` + * batches Omnigraph requests against `LABELS_BY_LABELHASH_MAX` (see + * `apps/ensapi/src/omnigraph-api/schema/label.ts`); the worst-case distinct LabelHash count + * per submission is still capped at `2 * MAX_LABELS_PER_SUBMISSION`. + */ +export const MAX_LABELS_PER_SUBMISSION = 100; + +/** + * Hard upper bound on how long a single `POST /api/discover` will wait on the + * Omnigraph labels lookup before failing the request. Prevents a stalled upstream + * from holding handler resources indefinitely. + */ +export const OMNIGRAPH_LOOKUP_TIMEOUT_MS = 10_000; + +const SubmissionsRequestSchema = z.object({ + labels: z + .array(z.string().min(1).max(1000)) + .min(1) + .max(MAX_LABELS_PER_SUBMISSION) + .transform((items) => items.map(asLiteralLabel)), + callerAddress: z + .string() + .refine((value) => isAddress(value, { strict: false }), { + message: "callerAddress must be a valid EVM address", + }) + .transform((value) => value.toLowerCase() as Address), +}); + +export type SubmissionResultItem = { + rawLabel: LiteralLabel; + labelHash: LabelHash; + normalizedLabel?: LiteralLabel; + normalizedLabelHash?: LabelHash; + status: LabelClassification["status"]; +}; + +export type SubmissionsResponse = { + callerAddress: Address; + results: SubmissionResultItem[]; +}; + +/** + * Structured submission record written to stdout as a single JSON line per request. + * + * Intentionally storage-agnostic: the shape is the future DB row shape so swapping the sink + * for Postgres + Drizzle later is a mechanical change. + * + * TODO(#2003): replace stdout sink with persistent store (Postgres + Drizzle) without changing + * this log shape. + * + * TODO(#2003): drain stored submissions on a schedule and submit them in batches; do not + * couple to the request lifecycle. + * + * TODO(#2003): a downstream aggregator can compute leaderboards by `callerAddress` from these + * lines (per-status counts are already implicit in `items[].status`). + */ +type SubmissionLogLine = { + ts: string; + requestId: string; + callerAddress: Address; + items: SubmissionResultItem[]; +}; + +function toResultItem(c: LabelClassification): SubmissionResultItem { + const item: SubmissionResultItem = { + rawLabel: c.rawLabel, + labelHash: c.labelHash, + status: c.status, + }; + if (c.normalizedLabel !== undefined) item.normalizedLabel = c.normalizedLabel; + if (c.normalizedLabelHash !== undefined) item.normalizedLabelHash = c.normalizedLabelHash; + return item; +} + +export async function submissionsHandler(c: Context) { + let body: unknown; + try { + body = await c.req.json(); + } catch { + return errorResponse(c, { message: "Request body must be valid JSON", status: 400 }); + } + + const parsed = SubmissionsRequestSchema.safeParse(body); + if (!parsed.success) { + return errorResponse(c, { error: parsed.error }); + } + + const { labels, callerAddress } = parsed.data; + + const hashed = labels.map(hashLabel); + const hashes = collectLookupHashes(hashed); + + let hits: LabelHit[]; + try { + // `AbortSignal.any` aborts when either the timeout fires or the client disconnects, so + // the upstream HTTP request is cancelled in both cases instead of being left dangling. + const signal = AbortSignal.any([ + AbortSignal.timeout(OMNIGRAPH_LOOKUP_TIMEOUT_MS), + c.req.raw.signal, + ]); + hits = await lookupLabels(hashes, signal); + } catch (error) { + // Client disconnected mid-flight; the response will be discarded by the framework, but + // re-throw so the upstream cancellation is visible in logs (`app.onError`). + if (c.req.raw.signal.aborted) throw error; + if (error instanceof DOMException && error.name === "TimeoutError") { + console.error("[ensrainbowbeam] omnigraph lookup timed out", { + timeoutMs: OMNIGRAPH_LOOKUP_TIMEOUT_MS, + }); + return errorResponse(c, { + message: `Omnigraph labels lookup timed out after ${OMNIGRAPH_LOOKUP_TIMEOUT_MS}ms`, + status: 504, + }); + } + // Log the underlying error so 502s aren't a black box in production. The client + // still sees a generic message (we don't leak upstream error details). + console.error("[ensrainbowbeam] omnigraph lookup failed", error); + return errorResponse(c, { message: "Upstream Omnigraph lookup failed", status: 502 }); + } + const classifications = classifySubmissions(hashed, hits); + const results = classifications.map(toResultItem); + + const ts = new Date().toISOString(); + const requestId = crypto.randomUUID(); + + const logLine: SubmissionLogLine = { + ts, + requestId, + callerAddress, + items: results, + }; + console.log(JSON.stringify(logLine)); + + const response: SubmissionsResponse = { + callerAddress, + results, + }; + return c.json(response); +} diff --git a/apps/ensrainbowbeam/src/index.ts b/apps/ensrainbowbeam/src/index.ts new file mode 100644 index 0000000000..6d77fccc3e --- /dev/null +++ b/apps/ensrainbowbeam/src/index.ts @@ -0,0 +1,70 @@ +import { config } from "@/config"; + +import { serve } from "@hono/node-server"; + +import app from "@/app"; + +const server = serve( + { + fetch: app.fetch, + port: config.port, + }, + (info) => { + // Operational logs go to stderr so stdout stays a clean JSONL stream of submission records + // (see `submissionsHandler` in `src/handlers/submissions.ts`). + console.error(`EnsRainbowBeam listening on port ${info.port}`); + }, +); + +/** + * Promisified `server.close()` that treats `ERR_SERVER_NOT_RUNNING` as a no-op so concurrent + * shutdown paths (e.g. SIGINT immediately followed by SIGTERM) don't reject the second close. + */ +const closeServer = () => + new Promise((resolve, reject) => + server.close((err) => { + if (!err) { + resolve(); + return; + } + if (typeof err === "object" && "code" in err && err.code === "ERR_SERVER_NOT_RUNNING") { + resolve(); + return; + } + reject(err); + }), + ); + +let shutdownPromise: Promise | undefined; + +/** + * Closes the HTTP server and exits with `exitCode` (default 0). + * + * Guarded with a single in-flight promise so repeated SIGINT/SIGTERM (or `uncaughtException` + * arriving during an in-progress shutdown) never trigger a duplicate close. + */ +const gracefulShutdown = (exitCode: number = 0): Promise => { + if (!shutdownPromise) { + shutdownPromise = closeServer() + .catch((error) => { + console.error("[ensrainbowbeam] shutdown error", error); + exitCode = exitCode === 0 ? 1 : exitCode; + }) + .then(() => { + process.exit(exitCode); + }); + } + return shutdownPromise; +}; + +process.on("SIGINT", () => { + void gracefulShutdown(0); +}); +process.on("SIGTERM", () => { + void gracefulShutdown(0); +}); + +process.on("uncaughtException", (error) => { + console.error("[ensrainbowbeam] uncaughtException", error); + void gracefulShutdown(1); +}); diff --git a/apps/ensrainbowbeam/src/lib/error-response.ts b/apps/ensrainbowbeam/src/lib/error-response.ts new file mode 100644 index 0000000000..b20735131a --- /dev/null +++ b/apps/ensrainbowbeam/src/lib/error-response.ts @@ -0,0 +1,60 @@ +import type { Context } from "hono"; +import type { ClientErrorStatusCode, ServerErrorStatusCode } from "hono/utils/http-status"; +import { ZodError } from "zod/v4"; + +/** + * Standardized error response shape for EnsRainbowBeam (`ensrainbowbeam`). + * + * Mirrors the shape used elsewhere in the monorepo (see AGENTS.md "API boundaries"). + */ +export type ErrorResponseBody = { + message: string; + details?: unknown; +}; + +type ErrorStatus = ClientErrorStatusCode | ServerErrorStatusCode; + +/** + * Sends a JSON error response with the canonical `{ message, details? }` shape. + * + * - `ZodError` becomes a 400 with `message: "Invalid Input"` and the flattened Zod issues as `details`. + * - `Error` instances forward their `message`. + * - Anything else becomes a generic `Internal Server Error`. + */ +type ErrorOptions = + | { error: unknown; status?: ErrorStatus; details?: unknown } + | { message: string; status: ErrorStatus; details?: unknown }; + +export function errorResponse( + c: Context, + options: ErrorOptions = { error: new Error("Internal Server Error"), status: 500 }, +) { + if ("message" in options) { + const body: ErrorResponseBody = { message: options.message }; + if (options.details !== undefined) body.details = options.details; + return c.json(body, options.status); + } + + const { error } = options; + let status: ErrorStatus = options.status ?? 500; + let body: ErrorResponseBody; + + if (error instanceof ZodError) { + status = 400; + body = { + message: "Invalid Input", + details: options.details ?? error.flatten(), + }; + } else if (error instanceof Error) { + body = { message: error.message }; + if (options.details !== undefined) body.details = options.details; + } else if (typeof error === "string") { + body = { message: error }; + if (options.details !== undefined) body.details = options.details; + } else { + body = { message: "Internal Server Error" }; + if (options.details !== undefined) body.details = options.details; + } + + return c.json(body, status); +} diff --git a/apps/ensrainbowbeam/src/lib/labels.test.ts b/apps/ensrainbowbeam/src/lib/labels.test.ts new file mode 100644 index 0000000000..70c777c866 --- /dev/null +++ b/apps/ensrainbowbeam/src/lib/labels.test.ts @@ -0,0 +1,156 @@ +import { + asLiteralLabel, + encodeLabelHash, + type InterpretedLabel, + type LabelHash, + type LiteralLabel, + labelhashLiteralLabel, +} from "enssdk"; +import { describe, expect, it } from "vitest"; + +import { + classifySubmissions, + collectLookupHashes, + hashLabel, + isUnhealedHit, + type LabelHit, +} from "./labels"; + +const literal = (s: string) => s as LiteralLabel; + +describe("hashLabel", () => { + it("computes labelhash for a normalized lowercase label", () => { + const result = hashLabel(literal("vitalik")); + expect(result).toEqual({ + rawLabel: literal("vitalik"), + labelHash: labelhashLiteralLabel(literal("vitalik")), + }); + }); + + it("does not populate normalizedLabel when raw equals normalized", () => { + const result = hashLabel(literal("eth")); + expect(result.normalizedLabel).toBeUndefined(); + expect(result.normalizedLabelHash).toBeUndefined(); + }); + + it("populates normalizedLabel + hash when uppercase label normalizes to lowercase", () => { + const result = hashLabel(literal("VITALIK")); + expect(result.rawLabel).toBe(literal("VITALIK")); + expect(result.labelHash).toBe(labelhashLiteralLabel(literal("VITALIK"))); + expect(result.normalizedLabel).toBe(literal("vitalik")); + expect(result.normalizedLabelHash).toBe(labelhashLiteralLabel(literal("vitalik"))); + expect(result.normalizedLabelHash).not.toBe(result.labelHash); + }); + + it("tolerates unnormalizable labels (e.g. labels with periods)", () => { + const result = hashLabel(literal("foo.bar")); + expect(result.rawLabel).toBe(literal("foo.bar")); + expect(result.labelHash).toBe(labelhashLiteralLabel(literal("foo.bar"))); + expect(result.normalizedLabel).toBeUndefined(); + expect(result.normalizedLabelHash).toBeUndefined(); + }); + + it("tolerates the empty string (cannot normalize)", () => { + const result = hashLabel(asLiteralLabel("")); + expect(result.rawLabel).toBe(asLiteralLabel("")); + expect(result.normalizedLabel).toBeUndefined(); + }); + + it("hashes a unicode label", () => { + const label = "vitalik\u00e9"; + const result = hashLabel(asLiteralLabel(label)); + expect(result.labelHash).toBe(labelhashLiteralLabel(asLiteralLabel(label))); + }); +}); + +describe("collectLookupHashes", () => { + it("returns the deduped union of raw + normalized labelhashes", () => { + const a = hashLabel(literal("VITALIK")); + const b = hashLabel(literal("vitalik")); + const hashes = collectLookupHashes([a, b]); + expect(hashes).toHaveLength(2); + expect(new Set(hashes).size).toBe(hashes.length); + expect(hashes).toContain(a.labelHash); + expect(hashes).toContain(b.labelHash); + }); + + it("ignores undefined normalized hashes", () => { + const a = hashLabel(literal("eth")); + const hashes = collectLookupHashes([a]); + expect(hashes).toEqual([a.labelHash]); + }); +}); + +describe("isUnhealedHit", () => { + it("returns true when interpreted equals encodeLabelHash(hash)", () => { + const hash = labelhashLiteralLabel(literal("xyz")); + const hit: LabelHit = { hash, interpreted: encodeLabelHash(hash) as InterpretedLabel }; + expect(isUnhealedHit(hit)).toBe(true); + }); + + it("returns false when interpreted is a healed literal", () => { + const hash = labelhashLiteralLabel(literal("vitalik")); + const hit: LabelHit = { hash, interpreted: "vitalik" as InterpretedLabel }; + expect(isUnhealedHit(hit)).toBe(false); + }); +}); + +describe("classifySubmissions", () => { + const vitalik = hashLabel(literal("vitalik")); + const eth = hashLabel(literal("eth")); + const upper = hashLabel(literal("HELLO")); + const random = hashLabel(literal("zzzdoesnotexistzzz")); + + function makeHealedHit(hash: LabelHash, label: string): LabelHit { + return { hash, interpreted: label as InterpretedLabel }; + } + + function makeUnhealedHit(hash: LabelHash): LabelHit { + return { hash, interpreted: encodeLabelHash(hash) as InterpretedLabel }; + } + + it("classifies absent labels as absent_from_index", () => { + const result = classifySubmissions([random], []); + expect(result).toEqual([{ ...random, status: "absent_from_index" }]); + }); + + it("classifies a healed-only hit as healed_in_index", () => { + const result = classifySubmissions([vitalik], [makeHealedHit(vitalik.labelHash, "vitalik")]); + expect(result[0].status).toBe("healed_in_index"); + }); + + it("classifies an unhealed hit as unknown_in_index", () => { + const result = classifySubmissions([vitalik], [makeUnhealedHit(vitalik.labelHash)]); + expect(result[0].status).toBe("unknown_in_index"); + }); + + it("classifies as unknown_in_index when ANY of the label's hashes is unhealed", () => { + // upper has both raw + normalized hashes; if normalized is unhealed but raw is healed, + // the submission should still be unknown_in_index. + if (upper.normalizedLabelHash === undefined) { + throw new Error("test fixture invariant: 'HELLO' must produce a normalized variant"); + } + const result = classifySubmissions( + [upper], + [makeHealedHit(upper.labelHash, upper.rawLabel), makeUnhealedHit(upper.normalizedLabelHash)], + ); + expect(result[0].status).toBe("unknown_in_index"); + }); + + it("handles a mixed batch", () => { + const result = classifySubmissions( + [vitalik, eth, random], + [ + makeUnhealedHit(vitalik.labelHash), + makeHealedHit(eth.labelHash, "eth"), + // random has no hit + ], + ); + + expect(result.map((r) => r.status)).toEqual([ + "unknown_in_index", + "healed_in_index", + "absent_from_index", + ]); + }); +}); diff --git a/apps/ensrainbowbeam/src/lib/labels.ts b/apps/ensrainbowbeam/src/lib/labels.ts new file mode 100644 index 0000000000..1412492f52 --- /dev/null +++ b/apps/ensrainbowbeam/src/lib/labels.ts @@ -0,0 +1,139 @@ +import { + asLiteralLabel, + encodeLabelHash, + type InterpretedLabel, + type Label, + type LabelHash, + type LiteralLabel, + labelhashLiteralLabel, + normalizeLabel, +} from "enssdk"; + +/** + * Per-label classification status against the ENSNode index. + * + * - `unknown_in_index`: at least one of the label's hashes is present in the index but its + * interpreted form is the encoded labelhash (i.e. the literal label has not yet been healed). + * These submissions are the interesting candidates for future on-chain emission. + * - `healed_in_index`: at least one of the label's hashes is present in the index and every + * returned hit already carries a healed (normalized literal) interpreted form. + * - `absent_from_index`: none of the label's hashes are present in the index at all. + */ +export type LabelStatus = "unknown_in_index" | "healed_in_index" | "absent_from_index"; + +/** + * The hashing result for a single submitted label. + * + * `rawLabel` is always a {@link LiteralLabel}: submissions are never typed or coerced as + * {@link InterpretedLabel}, so unnormalized discovery remains representable. + * + * `normalizedLabel` (and its hash) are populated only when the label is normalizable under + * ENSIP-15 and the normalized form differs from the raw literal. That branch still hashes via + * {@link labelhashLiteralLabel} on a new {@link LiteralLabel} cast of the normalized string. + */ +export type HashedLabel = { + rawLabel: LiteralLabel; + labelHash: LabelHash; + normalizedLabel?: LiteralLabel; + normalizedLabelHash?: LabelHash; +}; + +/** + * Per-label classification entry returned to the caller and emitted to the stdout sink. + */ +export type LabelClassification = HashedLabel & { + status: LabelStatus; +}; + +/** + * Subset of `Label` fields returned by the Omnigraph `labels` query that we care about. + */ +export type LabelHit = { + hash: LabelHash; + interpreted: InterpretedLabel; +}; + +/** + * Computes the hash representations of a single submitted {@link LiteralLabel}. + * + * Always computes `labelHash = labelhashLiteralLabel(rawLabel)`. If the label normalizes under + * ENSIP-15 to a **different string** than the submission, also computes hashes for that + * normalized form as a distinct {@link LiteralLabel}. Normalization failures are tolerated and + * treated as "no normalized variant". + */ +export function hashLabel(rawLabel: LiteralLabel): HashedLabel { + const labelHash = labelhashLiteralLabel(rawLabel); + + let normalizedLabel: LiteralLabel | undefined; + let normalizedLabelHash: LabelHash | undefined; + try { + const normalizedInterpreted = normalizeLabel(rawLabel); + // Compare as unbranded labels: normalization yields InterpretedLabel; submission is LiteralLabel. + if ((normalizedInterpreted as Label) !== (rawLabel as Label)) { + normalizedLabel = asLiteralLabel(normalizedInterpreted); + normalizedLabelHash = labelhashLiteralLabel(normalizedLabel); + } + } catch { + // unnormalizable raw label is expected; leave normalized variant undefined + } + + const result: HashedLabel = { rawLabel, labelHash }; + if (normalizedLabel !== undefined) { + result.normalizedLabel = normalizedLabel; + result.normalizedLabelHash = normalizedLabelHash; + } + return result; +} + +/** + * Returns the deduped flat list of labelhashes we want to look up via the Omnigraph + * `labels(by: { labelHashes })` query. + */ +export function collectLookupHashes(hashed: readonly HashedLabel[]): LabelHash[] { + const set = new Set(); + for (const item of hashed) { + set.add(item.labelHash); + if (item.normalizedLabelHash !== undefined) set.add(item.normalizedLabelHash); + } + return Array.from(set); +} + +/** + * True when an Omnigraph `Label` row represents an unhealed/unknown label + * (i.e. its `interpreted` form is the Encoded LabelHash of its `hash`). + */ +export function isUnhealedHit(hit: LabelHit): boolean { + return hit.interpreted === encodeLabelHash(hit.hash); +} + +/** + * Joins per-label hashes against the omnigraph hits and assigns a {@link LabelStatus} to each + * submitted raw label. + */ +export function classifySubmissions( + hashed: readonly HashedLabel[], + hits: readonly LabelHit[], +): LabelClassification[] { + const hitsByHash = new Map(); + for (const hit of hits) hitsByHash.set(hit.hash, hit); + + return hashed.map((item) => { + const candidateHashes: LabelHash[] = [item.labelHash]; + if (item.normalizedLabelHash !== undefined) candidateHashes.push(item.normalizedLabelHash); + + const matchedHits = candidateHashes + .map((h) => hitsByHash.get(h)) + .filter((h): h is LabelHit => h !== undefined); + + let status: LabelStatus; + if (matchedHits.length === 0) { + status = "absent_from_index"; + } else if (matchedHits.some(isUnhealedHit)) { + status = "unknown_in_index"; + } else { + status = "healed_in_index"; + } + + return { ...item, status }; + }); +} diff --git a/apps/ensrainbowbeam/src/lib/omnigraph-client.ts b/apps/ensrainbowbeam/src/lib/omnigraph-client.ts new file mode 100644 index 0000000000..859981a814 --- /dev/null +++ b/apps/ensrainbowbeam/src/lib/omnigraph-client.ts @@ -0,0 +1,73 @@ +import { config } from "@/config"; + +import { type LabelHash, OMNIGRAPH_LABELS_BY_LABELHASH_MAX } from "enssdk"; +import { createEnsNodeClient } from "enssdk/core"; +import { graphql, omnigraph } from "enssdk/omnigraph"; + +import type { LabelHit } from "@/lib/labels"; + +/** + * Must equal `LABELS_BY_LABELHASH_MAX` in `apps/ensapi/src/omnigraph-api/schema/label.ts`. + * EnsRainbowBeam chunks Omnigraph requests so a single submission can exceed this cap. + */ +const OMNIGRAPH_LABEL_LOOKUP_BATCH_SIZE = OMNIGRAPH_LABELS_BY_LABELHASH_MAX; + +/** + * Typed document for the `labels(by: { labelHashes })` Omnigraph query. + * + * Variable + result types are derived from the generated introspection in `enssdk/omnigraph`, + * so changes to the schema break this call site at typecheck time. + */ +export const LabelsByLabelHash = graphql(` + query LabelsByLabelHash($labelHashes: [LabelHash!]!) { + labels(by: { labelHashes: $labelHashes }) { + hash + interpreted + } + } +`); + +const client = createEnsNodeClient({ url: config.ensNodeUrl }).extend(omnigraph); + +/** + * Looks up Labels by a batch of LabelHashes against ENSNode's Omnigraph. + * + * The Omnigraph resolver enforces a hard cap on how many LabelHashes a single query may carry + * (`OMNIGRAPH_LABELS_BY_LABELHASH_MAX`). When the caller provides + * more (e.g. a full submission expanding to up to 200 distinct LabelHashes), this function + * automatically issues multiple batched requests and merges results. + * + * Pass an optional `signal` to forward request cancellation (e.g. handler timeout, client + * disconnect) to the underlying HTTP requests issued by the Omnigraph SDK. + */ +export async function lookupLabels( + labelHashes: readonly LabelHash[], + signal?: AbortSignal, +): Promise { + if (labelHashes.length === 0) return []; + + const chunks: LabelHash[][] = []; + for (let i = 0; i < labelHashes.length; i += OMNIGRAPH_LABEL_LOOKUP_BATCH_SIZE) { + chunks.push(labelHashes.slice(i, i + OMNIGRAPH_LABEL_LOOKUP_BATCH_SIZE) as LabelHash[]); + } + + const results = await Promise.all( + chunks.map((batch) => + client.omnigraph.query({ + query: LabelsByLabelHash, + variables: { labelHashes: [...batch] }, + signal, + }), + ), + ); + + for (const result of results) { + if (result.errors && result.errors.length > 0) { + throw new Error( + `Omnigraph labels query returned errors: ${result.errors.map((e) => e.message).join("; ")}`, + ); + } + } + + return results.flatMap((r) => r.data?.labels ?? []); +} diff --git a/apps/ensrainbowbeam/tsconfig.json b/apps/ensrainbowbeam/tsconfig.json new file mode 100644 index 0000000000..a70f8cfaeb --- /dev/null +++ b/apps/ensrainbowbeam/tsconfig.json @@ -0,0 +1,11 @@ +{ + "extends": "@ensnode/shared-configs/tsconfig.lib.json", + "compilerOptions": { + "target": "esnext", + "paths": { + "@/*": ["./src/*"] + } + }, + "include": ["./**/*.ts"], + "exclude": ["node_modules"] +} diff --git a/apps/ensrainbowbeam/vitest.config.ts b/apps/ensrainbowbeam/vitest.config.ts new file mode 100644 index 0000000000..91484cc733 --- /dev/null +++ b/apps/ensrainbowbeam/vitest.config.ts @@ -0,0 +1,15 @@ +import { resolve } from "node:path"; + +import { configDefaults, defineProject } from "vitest/config"; + +export default defineProject({ + resolve: { + alias: { + "@": resolve(__dirname, "./src"), + }, + }, + test: { + name: "ensrainbowbeam", + exclude: [...configDefaults.exclude, "**/*.integration.test.ts"], + }, +}); diff --git a/docker/README.md b/docker/README.md index 03ad9754a7..dce7268f5a 100644 --- a/docker/README.md +++ b/docker/README.md @@ -15,6 +15,7 @@ All commands are run from the **monorepo root**. | `docker/envs/.env.docker.example` | Example for user-specific config. Copy to `.env.docker.local` for mainnet/sepolia. | | `docker/envs/.env.docker.local` | User config (gitignored). Required for base stack, optional for devnet overrides. | +EnsRainbowBeam (`docker/services/ensrainbowbeam.yml`) is optional: extend that file when you want label-discovery HTTP alongside ENSApi (`ENSNODE_URL` must reach the Omnigraph-capable ENSApi URL). > To inspect the fully resolved config for any compose file (resolves all `extends`): > > ``` @@ -78,6 +79,7 @@ pnpm docker:build:ensnode pnpm docker:build:ensindexer pnpm docker:build:ensapi pnpm docker:build:ensrainbow +pnpm docker:build:ensrainbowbeam pnpm docker:build:ensadmin ``` diff --git a/docker/services/ensrainbowbeam.yml b/docker/services/ensrainbowbeam.yml new file mode 100644 index 0000000000..df0b8ddd81 --- /dev/null +++ b/docker/services/ensrainbowbeam.yml @@ -0,0 +1,21 @@ +services: + ensrainbowbeam: + container_name: ensrainbowbeam + image: ghcr.io/namehash/ensnode/ensrainbowbeam:${ENSNODE_VERSION:-1.10.1} + build: + dockerfile: ./apps/ensrainbowbeam/Dockerfile + context: ../.. + ports: + - "4444:4444" + environment: + ENSNODE_URL: http://ensapi:4334 + depends_on: + ensapi: + condition: service_started + healthcheck: + test: ["CMD", "curl", "--fail", "-s", "http://localhost:4444/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + start_interval: 1s diff --git a/docs/ensrainbow.io/package.json b/docs/ensrainbow.io/package.json index 08dce6d274..c33491450e 100644 --- a/docs/ensrainbow.io/package.json +++ b/docs/ensrainbow.io/package.json @@ -17,6 +17,7 @@ "dependencies": { "@astrojs/react": "catalog:", "@heroicons/react": "^2.2.0", + "@ensnode/ensrainbow-sdk": "workspace:*", "@namehash/namehash-ui": "workspace:*", "@tailwindcss/vite": "^4.1.15", "astro": "catalog:", diff --git a/docs/ensrainbow.io/src/components/atoms/LearnMoreButton.tsx b/docs/ensrainbow.io/src/components/atoms/LearnMoreButton.tsx index edf5ae41fe..d6106928c0 100644 --- a/docs/ensrainbow.io/src/components/atoms/LearnMoreButton.tsx +++ b/docs/ensrainbow.io/src/components/atoms/LearnMoreButton.tsx @@ -24,7 +24,7 @@ export const LearnMoreButton = ({ className={legacyButtonVariants({ variant: "secondary", size: "medium", - className: cc("max-w-full overflow-x-hidden", styles), + className: cc(["max-w-full overflow-x-hidden", styles]), })} > {text} diff --git a/docs/ensrainbow.io/src/components/organisms/HealUnknownName.tsx b/docs/ensrainbow.io/src/components/organisms/HealUnknownName.tsx index c8110b1504..9fe22bfcda 100644 --- a/docs/ensrainbow.io/src/components/organisms/HealUnknownName.tsx +++ b/docs/ensrainbow.io/src/components/organisms/HealUnknownName.tsx @@ -1,3 +1,190 @@ +import { legacyButtonVariants } from "@namehash/namehash-ui/legacy"; +import { useMemo, useState } from "react"; + +import { + EnsRainbowBeamClient, + EnsRainbowBeamHttpError, + validateDiscoverParams, +} from "@ensnode/ensrainbow-sdk/ensrainbowbeam-client"; + +type StatusBadgeProps = { + status: "unknown_in_index" | "healed_in_index" | "absent_from_index"; +}; + +function StatusBadge({ status }: StatusBadgeProps) { + const { label, className } = + status === "unknown_in_index" + ? { label: "Unknown", className: "bg-amber-100 text-amber-900" } + : status === "healed_in_index" + ? { label: "Healed", className: "bg-emerald-100 text-emerald-900" } + : { label: "Absent", className: "bg-gray-100 text-gray-900" }; + + return ( + + {label} + + ); +} + +const DEFAULT_BEAM_URL = "https://beam.ensrainbow.io"; + +function parseTextareaLabels(text: string): string[] { + return text + .split(/\r?\n/) + .map((line) => line.trim()) + .filter((line) => line.length > 0); +} + export default function HealUnknownName() { - return

HEAL UNKNOWN NAME

; + const [rawLabels, setRawLabels] = useState(""); + const [callerAddress, setCallerAddress] = useState(""); + const [beamUrl, setBeamUrl] = useState(DEFAULT_BEAM_URL); + + const [isSubmitting, setIsSubmitting] = useState(false); + const [error, setError] = useState(null); + const [results, setResults] = useState< + Array<{ + rawLabel: string; + normalizedLabel?: string; + status: "unknown_in_index" | "healed_in_index" | "absent_from_index"; + }> + >([]); + + const labels = useMemo(() => parseTextareaLabels(rawLabels), [rawLabels]); + + const canSubmit = !isSubmitting && labels.length > 0 && callerAddress.trim().length > 0; + + async function onBeamIt() { + setIsSubmitting(true); + setError(null); + setResults([]); + + try { + const validated = validateDiscoverParams({ labels, callerAddress }); + const client = new EnsRainbowBeamClient({ baseUrl: beamUrl }); + const res = await client.discover({ + labels: validated.labels, + callerAddress: validated.callerAddress, + }); + + setResults( + res.results.map((item) => ({ + rawLabel: item.rawLabel, + normalizedLabel: item.normalizedLabel, + status: item.status, + })), + ); + } catch (err) { + if (err instanceof EnsRainbowBeamHttpError) { + setError(`Beam request failed (${err.status}): ${err.message}`); + } else if (err instanceof Error) { + setError(err.message); + } else { + setError(String(err)); + } + } finally { + setIsSubmitting(false); + } + } + + return ( +
+
+

Beam labels for discovery

+

+ Paste one ENS label per line. We’ll classify each one against ENSNode’s index and tell you + whether it’s already healed, still unknown, or absent. +

+
+ +
+
+ + +