Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion actions/ql/lib/codeql/actions/security/ControlChecks.qll
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ string any_category() {
[
"untrusted-checkout", "output-clobbering", "envpath-injection", "envvar-injection",
"command-injection", "argument-injection", "code-injection", "cache-poisoning",
"untrusted-checkout-toctou", "artifact-poisoning", "artifact-poisoning-toctou"
"untrusted-checkout-toctou", "artifact-poisoning", "artifact-poisoning-toctou",
"prompt-injection"
]
}

Expand Down
130 changes: 130 additions & 0 deletions actions/ql/lib/codeql/actions/security/PromptInjectionQuery.qll
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
/**
* Provides classes and predicates for detecting prompt injection vulnerabilities
* in GitHub Actions workflows that use AI inference actions.
*
* This library identifies:
* - CWE-1427: User-controlled data flowing into AI model prompts without sanitization
*/

private import actions
private import codeql.actions.TaintTracking
private import codeql.actions.dataflow.ExternalFlow
import codeql.actions.dataflow.FlowSources
import codeql.actions.DataFlow
import codeql.actions.security.ControlChecks

/**
* A sink for prompt injection vulnerabilities (CWE-1427).
* Defined entirely through MaD extensible `actionsSinkModel` with kind `prompt-injection`.
*/
class PromptInjectionSink extends DataFlow::Node {
PromptInjectionSink() { madSink(this, "prompt-injection") }
}

/**
* A source representing user-controlled data from repository_dispatch client_payload.
* The client_payload can be set by anyone with write access to the repository
* or via the GitHub API, making it a potential vector for injection attacks.
*/
class RepositoryDispatchClientPayloadSource extends RemoteFlowSource {
string event;

RepositoryDispatchClientPayloadSource() {
exists(Expression e |
this.asExpr() = e and
e.getExpression().matches("github.event.client_payload%") and
event = e.getATriggerEvent().getName() and
event = "repository_dispatch"
)
}

override string getSourceType() { result = "client_payload" }

override string getEventName() { result = event }
}

/**
* Gets the relevant event for a sink in a privileged context,
* excluding sinks protected by control checks for the prompt-injection category.
*/
Event getRelevantEventForSink(DataFlow::Node sink) {
inPrivilegedContext(sink.asExpr(), result) and
not exists(ControlCheck check | check.protects(sink.asExpr(), result, "prompt-injection"))
}

/**
* Gets the relevant event for a prompt injection sink, including
* repository_dispatch events which are externally triggerable via the GitHub API.
*/
Event getRelevantEventForPromptInjection(DataFlow::Node sink) {
result = getRelevantEventForSink(sink)
or
exists(LocalJob job |
job = sink.asExpr().getEnclosingJob() and
job.getATriggerEvent() = result and
result.getName() = "repository_dispatch"
)
}

/**
* Holds when a critical-severity prompt injection path exists from source to sink.
*/
predicate criticalSeverityPromptInjection(
PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink, Event event
) {
PromptInjectionFlow::flowPath(source, sink) and
event = getRelevantEventForPromptInjection(sink.getNode()) and
source.getNode().(RemoteFlowSource).getEventName() = event.getName()
}

/**
* Gets the relevant event for a sink in any externally triggerable context,
* excluding sinks protected by control checks for the prompt-injection category.
* This is broader than `getRelevantEventForSink` — it includes non-privileged
* events like `pull_request` where an attacker can still control event properties
* (PR title, body, branch name) that flow into AI prompts.
*/
Event getRelevantEventForMediumSeverity(DataFlow::Node sink) {
exists(LocalJob job |
job = sink.asExpr().getEnclosingJob() and
job.getATriggerEvent() = result and
result.isExternallyTriggerable() and
not inPrivilegedContext(sink.asExpr(), result) and
not result.getName() = "repository_dispatch" and
not exists(ControlCheck check | check.protects(sink.asExpr(), result, "prompt-injection"))
)
}

/**
* Holds when a medium-severity prompt injection path exists from source to sink.
* Covers non-privileged but externally triggerable events (e.g. pull_request)
* where an attacker can control event properties that flow into AI prompts.
*/
predicate mediumSeverityPromptInjection(
PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink, Event event
) {
PromptInjectionFlow::flowPath(source, sink) and
event = getRelevantEventForMediumSeverity(sink.getNode()) and
source.getNode().(RemoteFlowSource).getEventName() = event.getName()
}

/**
* A taint-tracking configuration for unsafe user input
* that is used to construct AI prompts (CWE-1427).
*/
private module PromptInjectionConfig implements DataFlow::ConfigSig {
predicate isSource(DataFlow::Node source) { source instanceof RemoteFlowSource }

predicate isSink(DataFlow::Node sink) { sink instanceof PromptInjectionSink }

predicate observeDiffInformedIncrementalMode() { any() }

Location getASelectedSinkLocation(DataFlow::Node sink) {
result = sink.getLocation()
or
result = getRelevantEventForPromptInjection(sink).getLocation()
}
}

/** Tracks flow of unsafe user input that is used to construct AI prompts. */
module PromptInjectionFlow = TaintTracking::Global<PromptInjectionConfig>;
47 changes: 47 additions & 0 deletions actions/ql/lib/ext/manual/prompt_injection_sinks.model.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
extensions:
- addsTo:
pack: codeql/actions-all
extensible: actionsSinkModel
# AI actions whose prompt/input parameters accept user-controllable data.
# source: https://boostsecurityio.github.io/lotp/
# source: https://github.com/marketplace?type=actions&category=ai-assisted
data:
# === GitHub official AI actions ===
- ["actions/ai-inference", "*", "input.prompt", "prompt-injection", "manual"]
- ["actions/ai-inference", "*", "input.system-prompt", "prompt-injection", "manual"]
- ["github/ai-moderator", "*", "input.prompt", "prompt-injection", "manual"]
- ["github/ai-moderator", "*", "input.custom-instructions", "prompt-injection", "manual"]
# === Anthropic ===
- ["anthropics/claude-code-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["anthropics/claude-code-action", "*", "input.direct_prompt", "prompt-injection", "manual"]
- ["anthropics/claude-code-action", "*", "input.custom_instructions", "prompt-injection", "manual"]
# === Google ===
- ["google/gemini-code-assist-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["google-gemini/code-assist-action", "*", "input.prompt", "prompt-injection", "manual"]
# === OpenAI / GPT ===
- ["openai/chat-completion-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["openai/chat-completion-action", "*", "input.messages", "prompt-injection", "manual"]
- ["di-sukharev/opencommit", "*", "input.prompt", "prompt-injection", "manual"]
# === Community AI actions (marketplace) ===
- ["quixio/quix-streams-ci-ai-review", "*", "input.prompt", "prompt-injection", "manual"]
- ["rubberduck-ai/rubberduck-review-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["coderabbitai/ai-pr-reviewer", "*", "input.prompt", "prompt-injection", "manual"]
- ["coderabbitai/ai-pr-reviewer", "*", "input.system_message", "prompt-injection", "manual"]
- ["platisd/openai-pr-description", "*", "input.prompt", "prompt-injection", "manual"]
- ["CodiumAI/pr-agent", "*", "input.prompt", "prompt-injection", "manual"]
- ["arcee-ai/agent-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["langchain-ai/langsmith-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["abirismyname/create-discussion-with-ai", "*", "input.prompt", "prompt-injection", "manual"]
- ["yousefed/ai-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["nickscamara/openai-github-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["austenstone/openai-completion-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["joshspicer/gpt-review", "*", "input.prompt", "prompt-injection", "manual"]
- ["github/copilot-text-inference", "*", "input.prompt", "prompt-injection", "manual"]
# === Google (GitHub Actions org) ===
- ["google-github-actions/run-gemini-cli", "*", "input.prompt", "prompt-injection", "manual"]
# === Warp ===
- ["warpdotdev/oz-agent-action", "*", "input.prompt", "prompt-injection", "manual"]
# === Generic AI action patterns (common parameter names) ===
- ["togethercomputer/together-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["huggingface/inference-action", "*", "input.prompt", "prompt-injection", "manual"]
- ["replicate/action", "*", "input.prompt", "prompt-injection", "manual"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
## Overview

Passing user-controlled data into the prompt of an AI inference action allows an attacker to hijack the AI's behavior through **prompt injection**. Any workflow that feeds external input — issue titles, PR bodies, comments, or `repository_dispatch` payloads — directly into an AI prompt without sanitization is vulnerable to this class of attack.

When the AI action runs with access to secrets, write permissions, or code execution capabilities, a successful prompt injection can lead to secret exfiltration, unauthorized repository modifications, malicious package publication, or arbitrary command execution within the CI/CD environment.

## Recommendation

Never pass user-controlled data directly into AI prompt parameters. Instead:

- **Sanitize and truncate** user input before including it in prompts. Strip control characters and limit length.
- **Use environment variables** with shell-native interpolation (e.g. `$TITLE` not `${{ ... }}`) to prevent expression injection.
- **Restrict workflow permissions** to the minimum required (e.g. `issues: write`, `models: read` only).
- **Use deployment environments** with required reviewers for workflows that invoke AI actions on external input.
- **Validate AI output** before using it in subsequent steps — treat AI responses as untrusted data.

## Example

### Incorrect Usage

The following example passes unsanitized issue data directly into an AI prompt. An attacker can craft an issue title containing hidden instructions that cause the AI to ignore its system prompt, exfiltrate secrets via its response, or produce output that compromises downstream steps:

```yaml
on:
issues:
types: [opened]

jobs:
summary:
runs-on: ubuntu-latest
permissions:
issues: write
models: read
steps:
- name: Run AI inference
uses: actions/ai-inference@v1
with:
prompt: |
Summarize the following GitHub issue:
Title: ${{ github.event.issue.title }}
Body: ${{ github.event.issue.body }}
```

### Correct Usage

The following example sanitizes and truncates user input before passing it to the AI, and uses environment variables to prevent expression injection:

```yaml
on:
issues:
types: [opened]

jobs:
summary:
runs-on: ubuntu-latest
permissions:
issues: write
models: read
steps:
- name: Sanitize input
id: sanitize
run: |
SAFE_TITLE=$(echo "$TITLE" | head -c 200 | tr -dc '[:print:]')
echo "title=$SAFE_TITLE" >> $GITHUB_OUTPUT
env:
TITLE: ${{ github.event.issue.title }}

- name: Run AI inference
uses: actions/ai-inference@v1
with:
prompt: |
Summarize the following GitHub issue title (user input has been sanitized):
Title: ${{ steps.sanitize.outputs.title }}
```

## References

- Common Weakness Enumeration: [CWE-1427](https://cwe.mitre.org/data/definitions/1427.html).
- [OWASP LLM01: Prompt Injection](https://genai.owasp.org/llmrisk/llm01-prompt-injection/).
- GitHub Docs: [Security hardening for GitHub Actions](https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions).
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
/**
* @name Prompt injection from user-controlled Actions input
* @description User-controlled data flowing into AI prompts in a privileged context
* may allow attackers to manipulate AI behavior through prompt injection.
* @kind path-problem
* @problem.severity error
* @security-severity 9.0
* @precision high
* @id actions/prompt-injection/critical
* @tags actions
* security
* experimental
* external/cwe/cwe-1427
*/

import actions
import codeql.actions.security.PromptInjectionQuery
import PromptInjectionFlow::PathGraph

from PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink, Event event
where criticalSeverityPromptInjection(source, sink, event)
select sink.getNode(), source, sink,
"Potential prompt injection in $@, which may be controlled by an external user ($@).", sink,
sink.getNode().asExpr().(Expression).getRawExpression(), event, event.getName()
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
## Overview

Passing user-controlled data into the prompt of an AI inference action on non-privileged but externally triggerable events such as `pull_request` allows an attacker to manipulate AI behavior through **prompt injection**. While the `pull_request` event does not grant write access to the base repository by default, the AI action may still reveal sensitive information, produce misleading output, or influence downstream processes that trust the AI's response.

This is a lower-severity variant of prompt injection (compared to privileged contexts like `issues`, `issue_comment`, or `pull_request_target`) because the attacker's ability to exploit the injection is limited by the reduced permissions of the triggering event.

## Recommendation

Apply the same mitigations as for critical-severity prompt injection:

- **Sanitize and truncate** user input before including it in prompts.
- **Use environment variables** with shell-native interpolation instead of `${{ }}` expression syntax.
- **Restrict workflow permissions** to the minimum required.
- **Validate AI output** before using it in subsequent steps.

## Example

### Incorrect Usage

The following example passes the pull request title directly into an AI prompt on the `pull_request` event:

```yaml
on:
pull_request:
types: [opened]

jobs:
analyze:
runs-on: ubuntu-latest
steps:
- name: AI analysis
uses: actions/ai-inference@v1
with:
prompt: |
Analyze this PR title:
${{ github.event.pull_request.title }}
```
### Correct Usage
The following example sanitizes the PR title before passing it to the AI:
```yaml
on:
pull_request:
types: [opened]

jobs:
analyze:
runs-on: ubuntu-latest
steps:
- name: Sanitize input
id: sanitize
run: |
SAFE_TITLE=$(echo "$TITLE" | head -c 200 | tr -dc '[:print:]')
echo "title=$SAFE_TITLE" >> $GITHUB_OUTPUT
env:
TITLE: ${{ github.event.pull_request.title }}

- name: AI analysis
uses: actions/ai-inference@v1
with:
prompt: |
Analyze this PR title (sanitized):
${{ steps.sanitize.outputs.title }}
```
## References
- Common Weakness Enumeration: [CWE-1427](https://cwe.mitre.org/data/definitions/1427.html).
- [OWASP LLM01: Prompt Injection](https://genai.owasp.org/llmrisk/llm01-prompt-injection/).
- GitHub Docs: [Security hardening for GitHub Actions](https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions).
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
/**
* @name Prompt injection from user-controlled Actions input (medium severity)
* @description User-controlled data flowing into AI prompts on non-privileged
* but externally triggerable events (e.g. pull_request) may allow
* attackers to manipulate AI behavior through prompt injection.
* @kind path-problem
* @problem.severity warning
* @security-severity 5.0
* @precision medium
* @id actions/prompt-injection/medium
* @tags actions
* security
* experimental
* external/cwe/cwe-1427
*/

import actions
import codeql.actions.security.PromptInjectionQuery
import PromptInjectionFlow::PathGraph

from PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink, Event event
where mediumSeverityPromptInjection(source, sink, event)
select sink.getNode(), source, sink,
"Potential prompt injection in $@, which may be controlled by an external user ($@).", sink,
sink.getNode().asExpr().(Expression).getRawExpression(), event, event.getName()
Loading
Loading