Skip to content

Lucid SDK Reference

This section provides the API reference for the Lucid SDK and the underlying schemas used for attestation.

Auditor API

lucid_sdk.auditor.Auditor

Bases: ABC

Abstract base class for all Lucid Auditors.

Auditors are the primary units of safety enforcement in the Lucid platform. They execute within Trusted Execution Environments (TEEs) and produce cryptographically signed evidence of their findings.

Attributes:

Name Type Description
auditor_id str

Unique identifier for the auditor.

version str

Protocol version string.

tee LucidClient

Client for hardware attestation and secret management.

verifier_url str

Endpoint for the Verifier service to send evidence to.

Source code in packages/lucid-sdk/lucid_sdk/auditor.py
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
class Auditor(ABC):
    """Abstract base class for all Lucid Auditors.

    Auditors are the primary units of safety enforcement in the Lucid platform.
    They execute within Trusted Execution Environments (TEEs) and produce
    cryptographically signed evidence of their findings.

    Attributes:
        auditor_id (str): Unique identifier for the auditor.
        version (str): Protocol version string.
        tee (LucidClient): Client for hardware attestation and secret management.
        verifier_url (str): Endpoint for the Verifier service to send evidence to.
    """
    def __init__(self, auditor_id: str, version: str = "1.0.0", verifier_url: str = None):
        self.auditor_id = auditor_id
        self.version = version
        self.tee = LucidClient()
        self.verifier_url = verifier_url or os.getenv("VERIFIER_URL")

    @abstractmethod
    def check_request(self, request: Any) -> AuditResult:
        """Evaluate an incoming model request.

        Args:
            request: The request payload to audit.

        Returns:
            AuditResult containing the decision.
        """
        pass

    @abstractmethod
    def check_execution(self, context: Any) -> AuditResult:
        """Monitor the model execution process.

        Args:
            context: Execution context (e.g., telemetry indicators).

        Returns:
            AuditResult containing the decision.
        """
        pass

    @abstractmethod
    def check_response(self, response: Any, request: Any = None) -> AuditResult:
        """Evaluate a model generated response.

        Args:
            response: The response payload to audit.
            request: Optional original request for context.

        Returns:
            AuditResult containing the decision.
        """
        pass

    def emit_evidence(self, phase: str, result: AuditResult, request: Any = None):
        """Standard method to create, sign, and send evidence to the Verifier.

        This method wraps the audit result into a Measurement, calls the 
        hardware Attestation Agent to sign it, and pushes it to the Verifier.

        Args:
            phase: The lifecycle phase (artifact, request, execution, response).
            result: The result of the audit.
            request: Optional request object to extract nonces/metadata.
        """
        import httpx

        # Get session/nonce context for freshness
        nonce = None
        if isinstance(request, dict):
            nonce = request.get("nonce")
        elif hasattr(request, "nonce"):
            nonce = getattr(request, "nonce")

        measurement = self.create_measurement(phase, result, nonce=nonce)

        if self.verifier_url:
            try:
                payload = {
                    "session_id": nonce or "default-session",
                    "model_id": os.getenv("MODEL_ID", "default-model"),
                    "measurements": [measurement]
                }
                # Synchronous send to ensure evidence is committed during the call
                from tenacity import retry, stop_after_attempt, wait_exponential

                @retry(
                    stop=stop_after_attempt(3), 
                    wait=wait_exponential(multiplier=1, max=10),
                    reraise=True
                )
                def _send():
                    with httpx.Client() as client:
                        resp = client.post(f"{self.verifier_url}/evidence", json=payload, timeout=5.0)
                        resp.raise_for_status()

                _send()
            except Exception as e:
                logger.error("failed_to_emit_evidence", verifier_url=self.verifier_url, error=str(e))

    def create_measurement(self, phase: str, result: AuditResult, nonce: str = None) -> Dict[str, Any]:
        """Create and sign a Measurement for the given audit result.

        Args:
            phase: The lifecycle phase to record.
            result: The AuditResult to transform into a Measurement.
            nonce: Optional anti-replay nonce.

        Returns:
            Dictionary representation of a signed Measurement.
        """

        # Use the Measurement model to ensure consistency with Verifier
        m = Measurement(
            name=self.auditor_id,
            type=MeasurementType.policy_violation if result.decision == AuditDecision.DENY else MeasurementType.conformity,
            phase=phase,
            nonce=nonce,
            value={
                "decision": result.decision.value,
                "reason": result.reason,
                "modifications": result.modifications,
                "metadata": result.metadata
            },
            timestamp=datetime.now(timezone.utc),
            auditor_signature="" # Will be replaced
        )

        # Sign with TEE hardware quote/signature
        # Use Pydantic's JSON serialization to match Verifier precisely
        m_dict = m.model_dump(mode='json', exclude={"auditor_signature"})
        blob = json.dumps(m_dict, sort_keys=True, separators=(',', ':')).encode('utf-8')
        m.auditor_signature = self.tee.get_quote(blob)

        return m.model_dump(mode='json')

check_execution(context) abstractmethod

Monitor the model execution process.

Parameters:

Name Type Description Default
context Any

Execution context (e.g., telemetry indicators).

required

Returns:

Type Description
AuditResult

AuditResult containing the decision.

Source code in packages/lucid-sdk/lucid_sdk/auditor.py
113
114
115
116
117
118
119
120
121
122
123
@abstractmethod
def check_execution(self, context: Any) -> AuditResult:
    """Monitor the model execution process.

    Args:
        context: Execution context (e.g., telemetry indicators).

    Returns:
        AuditResult containing the decision.
    """
    pass

check_request(request) abstractmethod

Evaluate an incoming model request.

Parameters:

Name Type Description Default
request Any

The request payload to audit.

required

Returns:

Type Description
AuditResult

AuditResult containing the decision.

Source code in packages/lucid-sdk/lucid_sdk/auditor.py
101
102
103
104
105
106
107
108
109
110
111
@abstractmethod
def check_request(self, request: Any) -> AuditResult:
    """Evaluate an incoming model request.

    Args:
        request: The request payload to audit.

    Returns:
        AuditResult containing the decision.
    """
    pass

check_response(response, request=None) abstractmethod

Evaluate a model generated response.

Parameters:

Name Type Description Default
response Any

The response payload to audit.

required
request Any

Optional original request for context.

None

Returns:

Type Description
AuditResult

AuditResult containing the decision.

Source code in packages/lucid-sdk/lucid_sdk/auditor.py
125
126
127
128
129
130
131
132
133
134
135
136
@abstractmethod
def check_response(self, response: Any, request: Any = None) -> AuditResult:
    """Evaluate a model generated response.

    Args:
        response: The response payload to audit.
        request: Optional original request for context.

    Returns:
        AuditResult containing the decision.
    """
    pass

create_measurement(phase, result, nonce=None)

Create and sign a Measurement for the given audit result.

Parameters:

Name Type Description Default
phase str

The lifecycle phase to record.

required
result AuditResult

The AuditResult to transform into a Measurement.

required
nonce str

Optional anti-replay nonce.

None

Returns:

Type Description
Dict[str, Any]

Dictionary representation of a signed Measurement.

Source code in packages/lucid-sdk/lucid_sdk/auditor.py
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
def create_measurement(self, phase: str, result: AuditResult, nonce: str = None) -> Dict[str, Any]:
    """Create and sign a Measurement for the given audit result.

    Args:
        phase: The lifecycle phase to record.
        result: The AuditResult to transform into a Measurement.
        nonce: Optional anti-replay nonce.

    Returns:
        Dictionary representation of a signed Measurement.
    """

    # Use the Measurement model to ensure consistency with Verifier
    m = Measurement(
        name=self.auditor_id,
        type=MeasurementType.policy_violation if result.decision == AuditDecision.DENY else MeasurementType.conformity,
        phase=phase,
        nonce=nonce,
        value={
            "decision": result.decision.value,
            "reason": result.reason,
            "modifications": result.modifications,
            "metadata": result.metadata
        },
        timestamp=datetime.now(timezone.utc),
        auditor_signature="" # Will be replaced
    )

    # Sign with TEE hardware quote/signature
    # Use Pydantic's JSON serialization to match Verifier precisely
    m_dict = m.model_dump(mode='json', exclude={"auditor_signature"})
    blob = json.dumps(m_dict, sort_keys=True, separators=(',', ':')).encode('utf-8')
    m.auditor_signature = self.tee.get_quote(blob)

    return m.model_dump(mode='json')

emit_evidence(phase, result, request=None)

Standard method to create, sign, and send evidence to the Verifier.

This method wraps the audit result into a Measurement, calls the hardware Attestation Agent to sign it, and pushes it to the Verifier.

Parameters:

Name Type Description Default
phase str

The lifecycle phase (artifact, request, execution, response).

required
result AuditResult

The result of the audit.

required
request Any

Optional request object to extract nonces/metadata.

None
Source code in packages/lucid-sdk/lucid_sdk/auditor.py
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
def emit_evidence(self, phase: str, result: AuditResult, request: Any = None):
    """Standard method to create, sign, and send evidence to the Verifier.

    This method wraps the audit result into a Measurement, calls the 
    hardware Attestation Agent to sign it, and pushes it to the Verifier.

    Args:
        phase: The lifecycle phase (artifact, request, execution, response).
        result: The result of the audit.
        request: Optional request object to extract nonces/metadata.
    """
    import httpx

    # Get session/nonce context for freshness
    nonce = None
    if isinstance(request, dict):
        nonce = request.get("nonce")
    elif hasattr(request, "nonce"):
        nonce = getattr(request, "nonce")

    measurement = self.create_measurement(phase, result, nonce=nonce)

    if self.verifier_url:
        try:
            payload = {
                "session_id": nonce or "default-session",
                "model_id": os.getenv("MODEL_ID", "default-model"),
                "measurements": [measurement]
            }
            # Synchronous send to ensure evidence is committed during the call
            from tenacity import retry, stop_after_attempt, wait_exponential

            @retry(
                stop=stop_after_attempt(3), 
                wait=wait_exponential(multiplier=1, max=10),
                reraise=True
            )
            def _send():
                with httpx.Client() as client:
                    resp = client.post(f"{self.verifier_url}/evidence", json=payload, timeout=5.0)
                    resp.raise_for_status()

            _send()
        except Exception as e:
            logger.error("failed_to_emit_evidence", verifier_url=self.verifier_url, error=str(e))

lucid_sdk.auditor.AuditResult

The outcome of an auditor's evaluation.

Encapsulates the decision made by the auditor, along with any relevant reasons, modifications to the data, and additional metadata for the Verifier or Observer.

Attributes:

Name Type Description
decision AuditDecision

The final decision (PROCEED, DENY, REDACT, WARN).

reason Optional[str]

Human-readable explanation for the decision.

modifications Optional[Dict[str, Any]]

If decision is REDACT, contains the specific key-value updates to be applied to the request.

metadata Dict[str, Any]

Arbitrary key-value pairs providing extra context for the audit (e.g., specific rules triggered).

Source code in packages/lucid-sdk/lucid_sdk/auditor.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
class AuditResult:
    """The outcome of an auditor's evaluation.

    Encapsulates the decision made by the auditor, along with any relevant
    reasons, modifications to the data, and additional metadata for the
    Verifier or Observer.

    Attributes:
        decision (AuditDecision): The final decision (PROCEED, DENY, REDACT, WARN).
        reason (Optional[str]): Human-readable explanation for the decision.
        modifications (Optional[Dict[str, Any]]): If decision is REDACT, contains
            the specific key-value updates to be applied to the request.
        metadata (Dict[str, Any]): Arbitrary key-value pairs providing extra
            context for the audit (e.g., specific rules triggered).
    """
    def __init__(
        self, 
        decision: AuditDecision, 
        reason: Optional[str] = None, 
        modifications: Optional[Dict[str, Any]] = None,
        metadata: Dict[str, Any] = None
    ):
        self.decision = decision
        self.reason = reason
        self.modifications = modifications
        self.metadata = metadata or {}

Helpers

lucid_sdk.auditor.Proceed(reason=None, **metadata)

Helper to create a PROCEED result.

Parameters:

Name Type Description Default
reason str

Optional explanation.

None
**metadata

Extra context to include.

{}
Source code in packages/lucid-sdk/lucid_sdk/auditor.py
45
46
47
48
49
50
51
52
def Proceed(reason: str = None, **metadata) -> AuditResult:
    """Helper to create a PROCEED result.

    Args:
        reason: Optional explanation.
        **metadata: Extra context to include.
    """
    return AuditResult(AuditDecision.PROCEED, reason=reason, metadata=metadata)

lucid_sdk.auditor.Deny(reason, **metadata)

Helper to create a DENY result.

Parameters:

Name Type Description Default
reason str

Required explanation for the denial.

required
**metadata

Extra context to include.

{}
Source code in packages/lucid-sdk/lucid_sdk/auditor.py
54
55
56
57
58
59
60
61
def Deny(reason: str, **metadata) -> AuditResult:
    """Helper to create a DENY result.

    Args:
        reason: Required explanation for the denial.
        **metadata: Extra context to include.
    """
    return AuditResult(AuditDecision.DENY, reason=reason, metadata=metadata)

lucid_sdk.auditor.Redact(modifications, reason=None, **metadata)

Helper to create a REDACT result.

Parameters:

Name Type Description Default
modifications Dict[str, Any]

Dictionary of keys and their new, redacted values.

required
reason str

Optional explanation.

None
**metadata

Extra context to include.

{}
Source code in packages/lucid-sdk/lucid_sdk/auditor.py
63
64
65
66
67
68
69
70
71
def Redact(modifications: Dict[str, Any], reason: str = None, **metadata) -> AuditResult:
    """Helper to create a REDACT result.

    Args:
        modifications: Dictionary of keys and their new, redacted values.
        reason: Optional explanation.
        **metadata: Extra context to include.
    """
    return AuditResult(AuditDecision.REDACT, reason=reason, modifications=modifications, metadata=metadata)

lucid_sdk.auditor.Warn(reason, **metadata)

Helper to create a WARN result.

Parameters:

Name Type Description Default
reason str

Required explanation for the warning.

required
**metadata

Extra context to include.

{}
Source code in packages/lucid-sdk/lucid_sdk/auditor.py
73
74
75
76
77
78
79
80
def Warn(reason: str, **metadata) -> AuditResult:
    """Helper to create a WARN result.

    Args:
        reason: Required explanation for the warning.
        **metadata: Extra context to include.
    """
    return AuditResult(AuditDecision.WARN, reason=reason, metadata=metadata)

Models & Schemas

lucid_schemas.models.Measurement

Bases: BaseModel

The primary evidence unit produced by an Auditor.

Source code in packages/lucid-schemas/lucid_schemas/models.py
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
class Measurement(BaseModel):
    """The primary evidence unit produced by an Auditor."""
    name: str = Field(
        ..., 
        description="Unique label for the metric (e.g. 'pii_score').",
        examples=["pii_score"]
    )
    type: MeasurementType = Field(
        ..., 
        description="The type/category of measurement.",
        examples=["quantity"]
    )
    phase: Optional[str] = Field(
        None,
        description="The execution phase (static, pre, runtime, post).",
        examples=["pre"]
    )
    value: Union[str, float, bool, Dict[str, Any]] = Field(
        ..., 
        description="The actual data captured.",
        examples=[0.85]
    )
    compliance_framework: Optional[ComplianceFramework] = Field(
        None, 
        description="Optional mapping to a regulatory framework.",
        examples=["gdpr"]
    )
    control_id: Optional[str] = Field(
        None, 
        description="Specific section ID in the mapped framework.",
        examples=["Article 5(1)(f)"]
    )
    timestamp: datetime = Field(
        ..., 
        description="Time of capture (UTC).",
        examples=["2025-12-30T20:00:00Z"]
    )
    nonce: Optional[str] = Field(
        None,
        description="Optional freshness nonce from the relying party."
    )
    auditor_signature: str = Field(
        ..., 
        description="Cryptographic signature of the measurement blob.",
        examples=["rsa-signature-base64"]
    )
    confidence: float = Field(
        1.0, 
        ge=0.0, 
        le=1.0, 
        description="Confidence score from 0.0 (low) to 1.0 (high).",
        examples=[0.95]
    )

lucid_schemas.models.AIPassport

Bases: AttestationResult

Alias for AttestationResult for external API compatibility.

Source code in packages/lucid-schemas/lucid_schemas/models.py
243
244
245
class AIPassport(AttestationResult):
    """Alias for AttestationResult for external API compatibility."""
    pass

lucid_schemas.models.AuditDecision

Bases: str, Enum

Decision an auditor can make about a request/response

Source code in packages/lucid-schemas/lucid_schemas/models.py
52
53
54
55
56
57
class AuditDecision(str, Enum):
    """Decision an auditor can make about a request/response"""
    PROCEED = "proceed"      # Allow the request to continue
    DENY = "deny"            # Block the request entirely
    REDACT = "redact"        # Allow but modify content
    WARN = "warn"            # Allow but flag for review