Compare commits

..

2 Commits

Author SHA1 Message Date
codex-action
6251c5e1fd Fix CI failures for PR #574 2026-01-20 22:31:08 +00:00
Carlos Polop
92d9f27e8b Test CI failure flow 2026-01-20 23:24:03 +01:00
27 changed files with 298 additions and 703 deletions

View File

@@ -26,7 +26,7 @@ jobs:
steps: steps:
# checkout # checkout
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@master
with: with:
ref: ${{ github.head_ref }} ref: ${{ github.head_ref }}
@@ -36,11 +36,11 @@ jobs:
# Add MSBuild to the PATH: https://github.com/microsoft/setup-msbuild # Add MSBuild to the PATH: https://github.com/microsoft/setup-msbuild
- name: Setup MSBuild.exe - name: Setup MSBuild.exe
uses: microsoft/setup-msbuild@v2 uses: microsoft/setup-msbuild@v1.0.2
# Setup NuGet # Setup NuGet
- name: Setup NuGet.exe - name: Setup NuGet.exe
uses: nuget/setup-nuget@v2 uses: nuget/setup-nuget@v1
# Restore the packages for testing # Restore the packages for testing
- name: Restore the application - name: Restore the application
@@ -48,23 +48,23 @@ jobs:
# build # build
- name: run MSBuild - name: run MSBuild
run: msbuild $env:Solution_Path /p:Configuration=$env:Configuration /p:UseSharedCompilation=false run: msbuild $env:Solution_Path
# Execute all unit tests in the solution # Execute all unit tests in the solution
- name: Execute unit tests #- name: Execute unit tests
run: dotnet test $env:Solution_Path --configuration $env:Configuration # run: dotnet test $env:Solution_Path
# Build & update all versions # Build & update all versions
- name: Build all versions - name: Build all versions
run: | run: |
echo "build x64" echo "build x64"
msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="x64" /p:UseSharedCompilation=false msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="x64"
echo "build x86" echo "build x86"
msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="x86" /p:UseSharedCompilation=false msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="x86"
echo "build Any CPU" echo "build Any CPU"
msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="Any CPU" /p:UseSharedCompilation=false msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="Any CPU"
- name: Execute winPEAS -h - name: Execute winPEAS -h
shell: pwsh shell: pwsh
@@ -220,7 +220,6 @@ jobs:
- uses: actions/setup-go@v6 - uses: actions/setup-go@v6
with: with:
go-version: '1.23' go-version: '1.23'
cache: false
- run: go version - run: go version
# Build linpeas # Build linpeas
@@ -231,9 +230,6 @@ jobs:
python3 -m builder.linpeas_builder --all --output linpeas_fat.sh python3 -m builder.linpeas_builder --all --output linpeas_fat.sh
python3 -m builder.linpeas_builder --all-no-fat --output linpeas.sh python3 -m builder.linpeas_builder --all-no-fat --output linpeas.sh
python3 -m builder.linpeas_builder --small --output linpeas_small.sh python3 -m builder.linpeas_builder --small --output linpeas_small.sh
- name: Run linPEAS builder tests
run: python3 -m unittest discover -s linPEAS/tests -p "test_*.py"
# Build linpeas binaries # Build linpeas binaries
- name: Build linpeas binaries - name: Build linpeas binaries
@@ -366,7 +362,7 @@ jobs:
steps: steps:
# Download repo # Download repo
- uses: actions/checkout@v5 - uses: actions/checkout@v2
# Build linpeas # Build linpeas
- name: Build macpeas - name: Build macpeas
@@ -473,11 +469,11 @@ jobs:
- name: Get current date - name: Get current date
id: date id: date
run: echo "date=$(date +'%Y%m%d')" >> "$GITHUB_OUTPUT" run: echo "::set-output name=date::$(date +'%Y%m%d')"
- name: Generate random - name: Generate random
id: random_n id: random_n
run: echo "some_rand=$(openssl rand -hex 4)" >> "$GITHUB_OUTPUT" run: echo "::set-output name=some_rand::$(openssl rand -hex 4)"
# Create the release # Create the release
- name: Create Release - name: Create Release

View File

@@ -8,8 +8,6 @@ on:
paths-ignore: paths-ignore:
- '.github/**' - '.github/**'
workflow_dispatch:
jobs: jobs:
Build_and_test_winpeas_pr: Build_and_test_winpeas_pr:
runs-on: windows-latest runs-on: windows-latest
@@ -22,7 +20,7 @@ jobs:
steps: steps:
# checkout # checkout
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@master
with: with:
ref: ${{ github.head_ref }} ref: ${{ github.head_ref }}
@@ -32,11 +30,11 @@ jobs:
# Add MSBuild to the PATH # Add MSBuild to the PATH
- name: Setup MSBuild.exe - name: Setup MSBuild.exe
uses: microsoft/setup-msbuild@v2 uses: microsoft/setup-msbuild@v1.0.2
# Setup NuGet # Setup NuGet
- name: Setup NuGet.exe - name: Setup NuGet.exe
uses: nuget/setup-nuget@v2 uses: nuget/setup-nuget@v1
# Restore the packages for testing # Restore the packages for testing
- name: Restore the application - name: Restore the application
@@ -44,23 +42,19 @@ jobs:
# build # build
- name: run MSBuild - name: run MSBuild
run: msbuild $env:Solution_Path /p:Configuration=$env:Configuration /p:UseSharedCompilation=false run: msbuild $env:Solution_Path
# Execute unit tests in the solution
- name: Execute unit tests
run: dotnet test $env:Solution_Path --configuration $env:Configuration
# Build all versions # Build all versions
- name: Build all versions - name: Build all versions
run: | run: |
echo "build x64" echo "build x64"
msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="x64" /p:UseSharedCompilation=false msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="x64"
echo "build x86" echo "build x86"
msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="x86" /p:UseSharedCompilation=false msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="x86"
echo "build Any CPU" echo "build Any CPU"
msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="Any CPU" /p:UseSharedCompilation=false msbuild -m $env:Solution_Path /t:Rebuild /p:Configuration=$env:Configuration /p:Platform="Any CPU"
- name: Execute winPEAS -h - name: Execute winPEAS -h
shell: pwsh shell: pwsh
@@ -111,7 +105,7 @@ jobs:
steps: steps:
# Download repo # Download repo
- uses: actions/checkout@v5 - uses: actions/checkout@v2
with: with:
ref: ${{ github.head_ref }} ref: ${{ github.head_ref }}
@@ -119,7 +113,6 @@ jobs:
- uses: actions/setup-go@v6 - uses: actions/setup-go@v6
with: with:
go-version: '1.23' go-version: '1.23'
cache: false
- run: go version - run: go version
# Build linpeas # Build linpeas
@@ -130,9 +123,6 @@ jobs:
python3 -m builder.linpeas_builder --all --output linpeas_fat.sh python3 -m builder.linpeas_builder --all --output linpeas_fat.sh
python3 -m builder.linpeas_builder --all-no-fat --output linpeas.sh python3 -m builder.linpeas_builder --all-no-fat --output linpeas.sh
python3 -m builder.linpeas_builder --small --output linpeas_small.sh python3 -m builder.linpeas_builder --small --output linpeas_small.sh
- name: Run linPEAS builder tests
run: python3 -m unittest discover -s linPEAS/tests -p "test_*.py"
# Run linpeas help as quick test # Run linpeas help as quick test
- name: Run linpeas help - name: Run linpeas help
@@ -171,7 +161,7 @@ jobs:
steps: steps:
# Download repo # Download repo
- uses: actions/checkout@v5 - uses: actions/checkout@v2
with: with:
ref: ${{ github.head_ref }} ref: ${{ github.head_ref }}

View File

@@ -1,195 +0,0 @@
name: CI-master Failure Chack-Agent PR
on:
workflow_run:
workflows: ["CI-master_test"]
types: [completed]
jobs:
chack_agent_fix_master_failure:
if: >
${{ github.event.workflow_run.conclusion == 'failure' &&
github.event.workflow_run.head_branch == 'master' &&
!startsWith(github.event.workflow_run.head_commit.message, 'Fix CI-master failures for run #') }}
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
issues: write
actions: read
env:
TARGET_BRANCH: master
FIX_BRANCH: chack-agent/ci-master-fix-${{ github.event.workflow_run.id }}
CHACK_LOGS_HTTP_URL: ${{ secrets.CHACK_LOGS_HTTP_URL }}
steps:
- name: Checkout failing commit
uses: actions/checkout@v5
with:
ref: ${{ github.event.workflow_run.head_sha }}
fetch-depth: 0
persist-credentials: true
token: ${{ secrets.CHACK_AGENT_FIXER_TOKEN || github.token }}
- name: Configure git author
run: |
git config user.name "chack-agent"
git config user.email "chack-agent@users.noreply.github.com"
- name: Create fix branch
run: git checkout -b "$FIX_BRANCH"
- name: Fetch failure summary and failed-step logs
env:
GH_TOKEN: ${{ github.token }}
RUN_ID: ${{ github.event.workflow_run.id }}
run: |
failed_logs_file="$(pwd)/chack_failed_steps_logs.txt"
if gh run view "$RUN_ID" --repo "${{ github.repository }}" --log-failed > "$failed_logs_file"; then
if [ ! -s "$failed_logs_file" ]; then
echo "No failed step logs were returned by gh run view --log-failed." > "$failed_logs_file"
fi
else
echo "Failed to download failed step logs with gh run view --log-failed." > "$failed_logs_file"
fi
echo "FAILED_LOGS_PATH=$failed_logs_file" >> "$GITHUB_ENV"
gh api -H "Accept: application/vnd.github+json" \
/repos/${{ github.repository }}/actions/runs/$RUN_ID/jobs \
--paginate > /tmp/jobs.json
python3 - <<'PY'
import json
data = json.load(open('/tmp/jobs.json'))
lines = []
for job in data.get('jobs', []):
if job.get('conclusion') == 'failure':
lines.append(f"Job: {job.get('name')} (id {job.get('id')})")
lines.append(f"URL: {job.get('html_url')}")
for step in job.get('steps', []):
if step.get('conclusion') == 'failure':
lines.append(f" Step: {step.get('name')}")
lines.append("")
summary = "\n".join(lines).strip() or "No failing job details found."
with open('chack_failure_summary.txt', 'w') as handle:
handle.write(summary)
PY
- name: Create Chack Agent prompt
env:
RUN_URL: ${{ github.event.workflow_run.html_url }}
HEAD_SHA: ${{ github.event.workflow_run.head_sha }}
run: |
{
echo "You are fixing a failing CI-master_test run in ${{ github.repository }}."
echo "The failing workflow run is: ${RUN_URL}"
echo "The failing commit SHA is: ${HEAD_SHA}"
echo "The target branch for the final PR is: ${TARGET_BRANCH}"
echo ""
echo "Failure summary:"
cat chack_failure_summary.txt
echo ""
echo "Failed-step logs file absolute path (local runner): ${FAILED_LOGS_PATH}"
echo "Read that file to inspect the exact failing logs."
echo ""
echo "Please identify the cause, apply an easy, simple and minimal fix, and update files accordingly."
echo "Run any fast checks you can locally (no network)."
echo "Leave the repo in a state ready to commit; changes will be committed and pushed automatically."
} > chack_prompt.txt
- name: Set up Node.js for Codex
uses: actions/setup-node@v5
with:
node-version: "20"
- name: Install Codex CLI
run: |
npm install -g @openai/codex
codex --version
- name: Run Chack Agent
id: run_chack
uses: carlospolop/chack-agent@master
with:
provider: codex
model_primary: CHEAP_BUT_QUALITY
main_action: peass-ng
sub_action: CI-master Failure Chack-Agent PR
system_prompt: |
Diagnose the failing gh actions workflow, propose the minimal and effective safe fix, and implement it.
Run only fast, local checks (no network). Leave the repo ready to commit.
prompt_file: chack_prompt.txt
tools_config_json: "{\"exec_enabled\": true}"
session_config_json: "{\"long_term_memory_enabled\": false}"
agent_config_json: "{\"self_critique_enabled\": false, \"require_task_list_init_first\": true}"
openai_api_key: ${{ secrets.OPENAI_API_KEY }}
- name: Commit and push fix branch if changed
id: push_fix
run: |
if git diff --quiet; then
echo "No changes to commit."
echo "pushed=false" >> "$GITHUB_OUTPUT"
exit 0
fi
rm -f chack_failure_summary.txt chack_prompt.txt chack_failed_steps_logs.txt
git add -A
# Avoid workflow-file pushes with token scopes that cannot write workflows.
git reset -- .github/workflows || true
git checkout -- .github/workflows || true
git clean -fdx -- .github/workflows || true
git reset -- chack_failure_summary.txt chack_prompt.txt chack_failed_steps_logs.txt
if git diff --cached --name-only | grep -q '^.github/workflows/'; then
echo "Workflow-file changes are still staged; skipping push without workflows permission."
echo "pushed=false" >> "$GITHUB_OUTPUT"
exit 0
fi
if git diff --cached --quiet; then
echo "No committable changes left after filtering."
echo "pushed=false" >> "$GITHUB_OUTPUT"
exit 0
fi
git commit -m "Fix CI-master failures for run #${{ github.event.workflow_run.id }}"
if ! git push origin HEAD:"$FIX_BRANCH"; then
echo "Push failed (likely token workflow permission limits); skipping PR creation."
echo "pushed=false" >> "$GITHUB_OUTPUT"
exit 0
fi
echo "pushed=true" >> "$GITHUB_OUTPUT"
- name: Create PR to master
if: ${{ steps.push_fix.outputs.pushed == 'true' }}
id: create_pr
env:
GH_TOKEN: ${{ secrets.CHACK_AGENT_FIXER_TOKEN || github.token }}
RUN_URL: ${{ github.event.workflow_run.html_url }}
run: |
pr_url=$(gh pr create \
--title "Fix CI-master_test failure (run #${{ github.event.workflow_run.id }})" \
--body "Automated Chack Agent fix for failing CI-master_test run: ${RUN_URL}" \
--base "$TARGET_BRANCH" \
--head "$FIX_BRANCH")
echo "url=$pr_url" >> "$GITHUB_OUTPUT"
- name: Comment on created PR with Chack Agent result
if: ${{ steps.push_fix.outputs.pushed == 'true' && steps.run_chack.outputs.final-message != '' }}
uses: actions/github-script@v7
env:
PR_URL: ${{ steps.create_pr.outputs.url }}
CHACK_MESSAGE: ${{ steps.run_chack.outputs.final-message }}
with:
github-token: ${{ github.token }}
script: |
const prUrl = process.env.PR_URL;
const match = prUrl.match(/\/pull\/(\d+)$/);
if (!match) {
core.info(`Could not parse PR number from URL: ${prUrl}`);
return;
}
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: Number(match[1]),
body: process.env.CHACK_MESSAGE,
});

View File

@@ -1,4 +1,4 @@
name: Chack-Agent PR Triage name: Codex PR Triage
on: on:
workflow_run: workflow_run:
@@ -6,14 +6,12 @@ on:
types: [completed] types: [completed]
jobs: jobs:
chack_agent_triage: codex_triage:
if: ${{ github.event.workflow_run.conclusion == 'success' }} if: ${{ github.event.workflow_run.conclusion == 'success' }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: write contents: write
pull-requests: write pull-requests: write
env:
CHACK_LOGS_HTTP_URL: ${{ secrets.CHACK_LOGS_HTTP_URL }}
outputs: outputs:
should_run: ${{ steps.gate.outputs.should_run }} should_run: ${{ steps.gate.outputs.should_run }}
pr_number: ${{ steps.gate.outputs.pr_number }} pr_number: ${{ steps.gate.outputs.pr_number }}
@@ -30,7 +28,6 @@ jobs:
- name: Resolve PR context - name: Resolve PR context
id: gate id: gate
env: env:
GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
run: | run: |
pr_number="${{ github.event.workflow_run.pull_requests[0].number }}" pr_number="${{ github.event.workflow_run.pull_requests[0].number }}"
@@ -82,37 +79,15 @@ jobs:
${{ steps.gate.outputs.base_ref }} \ ${{ steps.gate.outputs.base_ref }} \
+refs/pull/${{ steps.gate.outputs.pr_number }}/head +refs/pull/${{ steps.gate.outputs.pr_number }}/head
- name: Set up Node.js for Codex - name: Run Codex
id: run_codex
if: ${{ steps.gate.outputs.should_run == 'true' }} if: ${{ steps.gate.outputs.should_run == 'true' }}
uses: actions/setup-node@v5 uses: openai/codex-action@v1
with: with:
node-version: "20" openai-api-key: ${{ secrets.OPENAI_API_KEY }}
output-schema-file: .github/codex/pr-merge-schema.json
- name: Install Codex CLI model: gpt-5.2-codex
if: ${{ steps.gate.outputs.should_run == 'true' }} prompt: |
run: |
npm install -g @openai/codex
codex --version
- name: Run Chack Agent
id: run_chack
if: ${{ steps.gate.outputs.should_run == 'true' }}
uses: carlospolop/chack-agent@master
with:
provider: codex
model_primary: CHEAP_BUT_QUALITY
main_action: peass-ng
sub_action: Chack-Agent PR Triage
system_prompt: |
You are Chack Agent, an elite PR reviewer for PEASS-ng.
Be conservative: merge only if changes are simple, safe, and valuable accoding to the uers give guidelines.
If in doubt, comment with clear questions or concerns.
Remember taht you are an autonomouts agent, use the exec tool to run the needed commands to list, read, analyze, modify, test...
tools_config_json: "{\"exec_enabled\": true}"
session_config_json: "{\"long_term_memory_enabled\": false}"
agent_config_json: "{\"self_critique_enabled\": false, \"require_task_list_init_first\": true}"
output_schema_file: .github/chack-agent/pr-merge-schema.json
user_prompt: |
You are reviewing PR #${{ steps.gate.outputs.pr_number }} for ${{ github.repository }}. You are reviewing PR #${{ steps.gate.outputs.pr_number }} for ${{ github.repository }}.
Decide whether to merge or comment. Merge only if all of the following are true: Decide whether to merge or comment. Merge only if all of the following are true:
@@ -132,32 +107,21 @@ jobs:
Review ONLY the changes introduced by the PR: Review ONLY the changes introduced by the PR:
git log --oneline ${{ steps.gate.outputs.base_sha }}...${{ steps.gate.outputs.head_sha }} git log --oneline ${{ steps.gate.outputs.base_sha }}...${{ steps.gate.outputs.head_sha }}
Output JSON only, following the provided schema: Output JSON only, following the provided schema.
.github/chack-agent/pr-merge-schema.json
openai_api_key: ${{ secrets.OPENAI_API_KEY }}
- name: Parse Chack Agent decision - name: Parse Codex decision
id: parse id: parse
if: ${{ steps.gate.outputs.should_run == 'true' }} if: ${{ steps.gate.outputs.should_run == 'true' }}
env: env:
CHACK_MESSAGE: ${{ steps.run_chack.outputs.final-message }} CODEX_MESSAGE: ${{ steps.run_codex.outputs.final-message }}
run: | run: |
python3 - <<'PY' python3 - <<'PY'
import json import json
import os import os
raw = (os.environ.get('CHACK_MESSAGE', '') or '').strip() data = json.loads(os.environ.get('CODEX_MESSAGE', '') or '{}')
decision = 'comment' decision = data.get('decision', 'comment')
message = 'Chack Agent did not provide details.' message = data.get('message', '').strip() or 'Codex did not provide details.'
try:
data = json.loads(raw or '{}')
if isinstance(data, dict):
decision = data.get('decision', 'comment')
message = data.get('message', '').strip() or message
else:
message = raw or message
except Exception:
message = raw or message
with open(os.environ['GITHUB_OUTPUT'], 'a') as handle: with open(os.environ['GITHUB_OUTPUT'], 'a') as handle:
handle.write(f"decision={decision}\n") handle.write(f"decision={decision}\n")
handle.write("message<<EOF\n") handle.write("message<<EOF\n")
@@ -167,31 +131,31 @@ jobs:
merge_or_comment: merge_or_comment:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: chack_agent_triage needs: codex_triage
if: ${{ github.event.workflow_run.conclusion == 'success' && needs.chack_agent_triage.outputs.should_run == 'true' && needs.chack_agent_triage.outputs.decision != '' }} if: ${{ github.event.workflow_run.conclusion == 'success' && needs.codex_triage.outputs.should_run == 'true' && needs.codex_triage.outputs.decision != '' }}
permissions: permissions:
contents: write contents: write
pull-requests: write pull-requests: write
steps: steps:
- name: Merge PR when approved - name: Merge PR when approved
if: ${{ needs.chack_agent_triage.outputs.decision == 'merge' }} if: ${{ needs.codex_triage.outputs.decision == 'merge' }}
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
PR_NUMBER: ${{ needs.chack_agent_triage.outputs.pr_number }} PR_NUMBER: ${{ needs.codex_triage.outputs.pr_number }}
run: | run: |
gh api \ gh api \
-X PUT \ -X PUT \
-H "Accept: application/vnd.github+json" \ -H "Accept: application/vnd.github+json" \
/repos/${{ github.repository }}/pulls/${PR_NUMBER}/merge \ /repos/${{ github.repository }}/pulls/${PR_NUMBER}/merge \
-f merge_method=squash \ -f merge_method=squash \
-f commit_title="Auto-merge PR #${PR_NUMBER} (Chack Agent)" -f commit_title="Auto-merge PR #${PR_NUMBER} (Codex)"
- name: Comment with doubts - name: Comment with doubts
if: ${{ needs.chack_agent_triage.outputs.decision == 'comment' }} if: ${{ needs.codex_triage.outputs.decision == 'comment' }}
uses: actions/github-script@v7 uses: actions/github-script@v7
env: env:
PR_NUMBER: ${{ needs.chack_agent_triage.outputs.pr_number }} PR_NUMBER: ${{ needs.codex_triage.outputs.pr_number }}
CHACK_MESSAGE: ${{ needs.chack_agent_triage.outputs.message }} CODEX_MESSAGE: ${{ needs.codex_triage.outputs.message }}
with: with:
github-token: ${{ github.token }} github-token: ${{ github.token }}
script: | script: |
@@ -199,5 +163,5 @@ jobs:
owner: context.repo.owner, owner: context.repo.owner,
repo: context.repo.repo, repo: context.repo.repo,
issue_number: Number(process.env.PR_NUMBER), issue_number: Number(process.env.PR_NUMBER),
body: process.env.CHACK_MESSAGE, body: process.env.CODEX_MESSAGE,
}); });

View File

@@ -1,4 +1,4 @@
name: PR Failure Chack-Agent Dispatch name: PR Failure Codex Dispatch
on: on:
workflow_run: workflow_run:
@@ -6,7 +6,7 @@ on:
types: [completed] types: [completed]
jobs: jobs:
resolve_pr_context: codex_on_failure:
if: > if: >
${{ github.event.workflow_run.conclusion == 'failure' && ${{ github.event.workflow_run.conclusion == 'failure' &&
github.event.workflow_run.pull_requests && github.event.workflow_run.pull_requests &&
@@ -14,14 +14,11 @@ jobs:
!startsWith(github.event.workflow_run.head_commit.message, 'Fix CI failures for PR #') }} !startsWith(github.event.workflow_run.head_commit.message, 'Fix CI failures for PR #') }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
pull-requests: read contents: write
issues: read pull-requests: write
outputs: issues: write
number: ${{ steps.pr_context.outputs.number }} actions: read
author: ${{ steps.pr_context.outputs.author }}
head_repo: ${{ steps.pr_context.outputs.head_repo }}
head_branch: ${{ steps.pr_context.outputs.head_branch }}
should_run: ${{ steps.pr_context.outputs.should_run }}
steps: steps:
- name: Resolve PR context - name: Resolve PR context
id: pr_context id: pr_context
@@ -38,46 +35,25 @@ jobs:
pr_head_branch=$(gh api -H "Accept: application/vnd.github+json" \ pr_head_branch=$(gh api -H "Accept: application/vnd.github+json" \
/repos/${{ github.repository }}/pulls/${PR_NUMBER} \ /repos/${{ github.repository }}/pulls/${PR_NUMBER} \
--jq '.head.ref') --jq '.head.ref')
pr_labels=$(gh api -H "Accept: application/vnd.github+json" \
/repos/${{ github.repository }}/issues/${PR_NUMBER} \
--jq '.labels[].name')
if echo "$pr_labels" | grep -q "^chack-agent-fix-attempted$"; then
echo "chack-agent fix already attempted for PR #${PR_NUMBER}; skipping."
should_run=false
else
should_run=true
fi
{ {
echo "number=${PR_NUMBER}" echo "number=${PR_NUMBER}"
echo "author=${pr_author}" echo "author=${pr_author}"
echo "head_repo=${pr_head_repo}" echo "head_repo=${pr_head_repo}"
echo "head_branch=${pr_head_branch}" echo "head_branch=${pr_head_branch}"
echo "should_run=${should_run}"
} >> "$GITHUB_OUTPUT" } >> "$GITHUB_OUTPUT"
chack_agent_on_failure:
needs: resolve_pr_context
if: ${{ needs.resolve_pr_context.outputs.author == 'carlospolop' && needs.resolve_pr_context.outputs.should_run == 'true' }}
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
issues: write
actions: read
env:
CHACK_LOGS_HTTP_URL: ${{ secrets.CHACK_LOGS_HTTP_URL }}
steps:
- name: Comment on PR with failure info - name: Comment on PR with failure info
if: ${{ steps.pr_context.outputs.author == 'carlospolop' }}
uses: actions/github-script@v7 uses: actions/github-script@v7
env: env:
PR_NUMBER: ${{ needs.resolve_pr_context.outputs.number }} PR_NUMBER: ${{ steps.pr_context.outputs.number }}
RUN_URL: ${{ github.event.workflow_run.html_url }} RUN_URL: ${{ github.event.workflow_run.html_url }}
WORKFLOW_NAME: ${{ github.event.workflow_run.name }} WORKFLOW_NAME: ${{ github.event.workflow_run.name }}
with: with:
github-token: ${{ github.token }} github-token: ${{ github.token }}
script: | script: |
const prNumber = Number(process.env.PR_NUMBER); const prNumber = Number(process.env.PR_NUMBER);
const body = `PR #${prNumber} had a failing workflow "${process.env.WORKFLOW_NAME}".\n\nRun: ${process.env.RUN_URL}\n\nLaunching Chack Agent to attempt a fix.`; const body = `PR #${prNumber} had a failing workflow "${process.env.WORKFLOW_NAME}".\n\nRun: ${process.env.RUN_URL}\n\nLaunching Codex to attempt a fix.`;
await github.rest.issues.createComment({ await github.rest.issues.createComment({
owner: context.repo.owner, owner: context.repo.owner,
repo: context.repo.repo, repo: context.repo.repo,
@@ -85,30 +61,23 @@ jobs:
body, body,
}); });
- name: Mark fix attempt
env:
PR_NUMBER: ${{ needs.resolve_pr_context.outputs.number }}
GH_TOKEN: ${{ github.token }}
run: |
gh api -X POST -H "Accept: application/vnd.github+json" \
/repos/${{ github.repository }}/issues/${PR_NUMBER}/labels \
-f labels[]=chack-agent-fix-attempted
- name: Checkout PR head - name: Checkout PR head
if: ${{ steps.pr_context.outputs.author == 'carlospolop' }}
uses: actions/checkout@v5 uses: actions/checkout@v5
with: with:
repository: ${{ needs.resolve_pr_context.outputs.head_repo }} repository: ${{ steps.pr_context.outputs.head_repo }}
ref: ${{ github.event.workflow_run.head_sha }} ref: ${{ github.event.workflow_run.head_sha }}
fetch-depth: 0 fetch-depth: 0
persist-credentials: true persist-credentials: true
token: ${{ secrets.CHACK_AGENT_FIXER_TOKEN || github.token }}
- name: Configure git author - name: Configure git author
if: ${{ steps.pr_context.outputs.author == 'carlospolop' }}
run: | run: |
git config user.name "chack-agent" git config user.name "codex-action"
git config user.email "chack-agent@users.noreply.github.com" git config user.email "codex-action@users.noreply.github.com"
- name: Fetch failure summary - name: Fetch failure summary
if: ${{ steps.pr_context.outputs.author == 'carlospolop' }}
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
RUN_ID: ${{ github.event.workflow_run.id }} RUN_ID: ${{ github.event.workflow_run.id }}
@@ -131,15 +100,16 @@ jobs:
lines.append("") lines.append("")
summary = "\n".join(lines).strip() or "No failing job details found." summary = "\n".join(lines).strip() or "No failing job details found."
with open('chack_failure_summary.txt', 'w') as handle: with open('codex_failure_summary.txt', 'w') as handle:
handle.write(summary) handle.write(summary)
PY PY
- name: Create Chack Agent prompt - name: Create Codex prompt
if: ${{ steps.pr_context.outputs.author == 'carlospolop' }}
env: env:
PR_NUMBER: ${{ needs.resolve_pr_context.outputs.number }} PR_NUMBER: ${{ steps.pr_context.outputs.number }}
RUN_URL: ${{ github.event.workflow_run.html_url }} RUN_URL: ${{ github.event.workflow_run.html_url }}
HEAD_BRANCH: ${{ needs.resolve_pr_context.outputs.head_branch }} HEAD_BRANCH: ${{ steps.pr_context.outputs.head_branch }}
run: | run: |
{ {
echo "You are fixing CI failures for PR #${PR_NUMBER} in ${{ github.repository }}." echo "You are fixing CI failures for PR #${PR_NUMBER} in ${{ github.repository }}."
@@ -147,77 +117,45 @@ jobs:
echo "The PR branch is: ${HEAD_BRANCH}" echo "The PR branch is: ${HEAD_BRANCH}"
echo "" echo ""
echo "Failure summary:" echo "Failure summary:"
cat chack_failure_summary.txt cat codex_failure_summary.txt
echo "" echo ""
echo "Please identify the cause, apply a easy, simple and minimal fix, and update files accordingly." echo "Please identify the cause, apply a easy, simple and minimal fix, and update files accordingly."
echo "Run any fast checks you can locally (no network)." echo "Run any fast checks you can locally (no network)."
echo "Leave the repo in a state ready to commit as when you finish, it'll be automatically committed and pushed." echo "Leave the repo in a state ready to commit as when you finish, it'll be automatically committed and pushed."
} > chack_prompt.txt } > codex_prompt.txt
- name: Set up Node.js for Codex - name: Run Codex
uses: actions/setup-node@v5 if: ${{ steps.pr_context.outputs.author == 'carlospolop' }}
id: run_codex
uses: openai/codex-action@v1
with: with:
node-version: "20" openai-api-key: ${{ secrets.OPENAI_API_KEY }}
prompt-file: codex_prompt.txt
- name: Install Codex CLI sandbox: workspace-write
run: | model: gpt-5.2-codex
npm install -g @openai/codex
codex --version
- name: Run Chack Agent
id: run_chack
uses: carlospolop/chack-agent@master
with:
provider: codex
model_primary: CHEAP_BUT_QUALITY
main_action: peass-ng
sub_action: PR Failure Chack-Agent Dispatch
system_prompt: |
You are Chack Agent, an elite CI-fix engineer.
Diagnose the failing workflow, propose the minimal safe fix, and implement it.
Run only fast, local checks (no network). Leave the repo ready to commit.
prompt_file: chack_prompt.txt
tools_config_json: "{\"exec_enabled\": true}"
session_config_json: "{\"long_term_memory_enabled\": false}"
agent_config_json: "{\"self_critique_enabled\": false, \"require_task_list_init_first\": true}"
openai_api_key: ${{ secrets.OPENAI_API_KEY }}
- name: Commit and push if changed - name: Commit and push if changed
if: ${{ steps.pr_context.outputs.author == 'carlospolop' }}
env: env:
TARGET_BRANCH: ${{ needs.resolve_pr_context.outputs.head_branch }} TARGET_BRANCH: ${{ steps.pr_context.outputs.head_branch }}
PR_NUMBER: ${{ needs.resolve_pr_context.outputs.number }} PR_NUMBER: ${{ steps.pr_context.outputs.number }}
run: | run: |
if git diff --quiet; then if git diff --quiet; then
echo "No changes to commit." echo "No changes to commit."
exit 0 exit 0
fi fi
rm -f chack_failure_summary.txt chack_prompt.txt rm -f codex_failure_summary.txt codex_prompt.txt
git add -A git add -A
# Avoid workflow-file pushes with token scopes that cannot write workflows. git reset -- codex_failure_summary.txt codex_prompt.txt
git reset -- .github/workflows || true
git checkout -- .github/workflows || true
git clean -fdx -- .github/workflows || true
git reset -- chack_failure_summary.txt chack_prompt.txt
if git diff --cached --name-only | grep -q '^.github/workflows/'; then
echo "Workflow-file changes are still staged; skipping push without workflows permission."
exit 0
fi
if git diff --cached --quiet; then
echo "No committable changes left after filtering."
exit 0
fi
git commit -m "Fix CI failures for PR #${PR_NUMBER}" git commit -m "Fix CI failures for PR #${PR_NUMBER}"
if ! git push origin HEAD:${TARGET_BRANCH}; then git push origin HEAD:${TARGET_BRANCH}
echo "Push failed (likely token workflow permission limits); leaving run successful without push."
exit 0
fi
- name: Comment with Chack Agent result - name: Comment with Codex result
if: ${{ steps.run_chack.outputs.final-message != '' }} if: ${{ steps.pr_context.outputs.author == 'carlospolop' && steps.run_codex.outputs.final-message != '' }}
uses: actions/github-script@v7 uses: actions/github-script@v7
env: env:
PR_NUMBER: ${{ needs.resolve_pr_context.outputs.number }} PR_NUMBER: ${{ steps.pr_context.outputs.number }}
CHACK_MESSAGE: ${{ steps.run_chack.outputs.final-message }} CODEX_MESSAGE: ${{ steps.run_codex.outputs.final-message }}
with: with:
github-token: ${{ github.token }} github-token: ${{ github.token }}
script: | script: |
@@ -225,5 +163,5 @@ jobs:
owner: context.repo.owner, owner: context.repo.owner,
repo: context.repo.repo, repo: context.repo.repo,
issue_number: Number(process.env.PR_NUMBER), issue_number: Number(process.env.PR_NUMBER),
body: process.env.CHACK_MESSAGE, body: process.env.CODEX_MESSAGE,
}); });

View File

@@ -0,0 +1,39 @@
# Title: System Information - Linux Exploit Suggester
# ID: SY_Linux_exploit_suggester
# Author: Carlos Polop
# Last Update: 07-03-2024
# Description: Execute Linux Exploit Suggester to identify potential kernel exploits:
# - Automated kernel vulnerability detection
# - Common vulnerable scenarios:
# * Known kernel vulnerabilities
# * Unpatched kernel versions
# * Missing security patches
# - Exploitation methods:
# * Kernel exploit execution: Use suggested exploits
# * Common attack vectors:
# - Kernel memory corruption
# - Race conditions
# - Use-after-free
# - Integer overflow
# * Exploit techniques:
# - Kernel memory manipulation
# - Privilege escalation
# - Root access acquisition
# - System compromise
# License: GNU GPL
# Version: 1.0
# Functions Used: print_2title, print_info
# Global Variables: $MACPEAS
# Initial Functions:
# Generated Global Variables: $les_b64
# Fat linpeas: 0
# Small linpeas: 1
if [ "$(command -v bash 2>/dev/null || echo -n '')" ] && ! [ "$MACPEAS" ]; then
print_2title "Executing Linux Exploit Suggester"
print_info "https://github.com/mzet-/linux-exploit-suggester"
les_b64="peass{https://raw.githubusercontent.com/mzet-/linux-exploit-suggester/master/linux-exploit-suggester.sh}"
echo $les_b64 | base64 -d | bash | sed "s,$(printf '\033')\\[[0-9;]*[a-zA-Z],,g" | grep -i "\[CVE" -A 10 | grep -Ev "^\-\-$" | sed -${E} "s/\[(CVE-[0-9]+-[0-9]+,?)+\].*/${SED_RED}/g"
echo ""
fi

View File

@@ -0,0 +1,41 @@
# Title: System Information - Linux Exploit Suggester 2
# ID: SY_Linux_exploit_suggester_2
# Author: Carlos Polop
# Last Update: 07-03-2024
# Description: Execute Linux Exploit Suggester 2 (Perl version) to identify potential kernel exploits:
# - Alternative kernel vulnerability detection
# - Perl-based exploit suggestions
# - Common vulnerable scenarios:
# * Known kernel vulnerabilities
# * Unpatched kernel versions
# * Missing security patches
# * Alternative exploit paths
# - Exploitation methods:
# * Kernel exploit execution: Use suggested exploits
# * Common attack vectors:
# - Kernel memory corruption
# - Race conditions
# - Use-after-free
# - Integer overflow
# * Exploit techniques:
# - Kernel memory manipulation
# - Privilege escalation
# - Root access acquisition
# - System compromise
# License: GNU GPL
# Version: 1.0
# Functions Used: print_2title, print_info
# Global Variables:
# Initial Functions:
# Generated Global Variables: $les2_b64
# Fat linpeas: 1
# Small linpeas: 0
if [ "$(command -v perl 2>/dev/null || echo -n '')" ] && ! [ "$MACPEAS" ]; then
print_2title "Executing Linux Exploit Suggester 2"
print_info "https://github.com/jondonas/linux-exploit-suggester-2"
les2_b64="peass{https://raw.githubusercontent.com/jondonas/linux-exploit-suggester-2/master/linux-exploit-suggester-2.pl}"
echo $les2_b64 | base64 -d | perl 2>/dev/null | sed "s,$(printf '\033')\\[[0-9;]*[a-zA-Z],,g" | grep -iE "CVE" -B 1 -A 10 | grep -Ev "^\-\-$" | sed -${E} "s,CVE-[0-9]+-[0-9]+,${SED_RED},g"
echo ""
fi

View File

@@ -30,33 +30,11 @@
# Functions Used: echo_not_found, print_2title, print_list, warn_exec # Functions Used: echo_not_found, print_2title, print_list, warn_exec
# Global Variables: # Global Variables:
# Initial Functions: # Initial Functions:
# Generated Global Variables: $ASLR, $hypervisorflag, $detectedvirt, $unpriv_userns_clone, $perf_event_paranoid, $mmap_min_addr, $ptrace_scope, $dmesg_restrict, $kptr_restrict, $unpriv_bpf_disabled, $protected_symlinks, $protected_hardlinks, $label, $sysctl_path, $sysctl_var, $zero_color, $nonzero_color, $sysctl_value # Generated Global Variables: $ASLR, $hypervisorflag, $detectedvirt, $unpriv_userns_clone, $perf_event_paranoid, $mmap_min_addr, $ptrace_scope, $dmesg_restrict, $kptr_restrict, $unpriv_bpf_disabled, $protected_symlinks, $protected_hardlinks
# Fat linpeas: 0 # Fat linpeas: 0
# Small linpeas: 0 # Small linpeas: 0
print_sysctl_eq_zero() {
local label="$1"
local sysctl_path="$2"
local sysctl_var="$3"
local zero_color="$4"
local nonzero_color="$5"
local sysctl_value
print_list "$label" "$NC"
sysctl_value=$(cat "$sysctl_path" 2>/dev/null)
eval "$sysctl_var=\$sysctl_value"
if [ -z "$sysctl_value" ]; then
echo_not_found "$sysctl_path"
else
if [ "$sysctl_value" -eq 0 ]; then
echo "0" | sed -${E} "s,0,${zero_color},"
else
echo "$sysctl_value" | sed -${E} "s,.*,${nonzero_color},g"
fi
fi
}
#-- SY) AppArmor #-- SY) AppArmor
print_2title "Protections" print_2title "Protections"
print_list "AppArmor enabled? .............. "$NC print_list "AppArmor enabled? .............. "$NC
@@ -103,25 +81,67 @@ print_list "User namespace? ................ "$NC
if [ "$(cat /proc/self/uid_map 2>/dev/null)" ]; then echo "enabled" | sed "s,enabled,${SED_GREEN},"; else echo "disabled" | sed "s,disabled,${SED_RED},"; fi if [ "$(cat /proc/self/uid_map 2>/dev/null)" ]; then echo "enabled" | sed "s,enabled,${SED_GREEN},"; else echo "disabled" | sed "s,disabled,${SED_RED},"; fi
#-- SY) Unprivileged user namespaces #-- SY) Unprivileged user namespaces
print_sysctl_eq_zero "unpriv_userns_clone? ........... " "/proc/sys/kernel/unprivileged_userns_clone" "unpriv_userns_clone" "$SED_GREEN" "$SED_RED" print_list "unpriv_userns_clone? ........... "$NC
unpriv_userns_clone=$(cat /proc/sys/kernel/unprivileged_userns_clone 2>/dev/null)
if [ -z "$unpriv_userns_clone" ]; then
echo_not_found "/proc/sys/kernel/unprivileged_userns_clone"
else
if [ "$unpriv_userns_clone" -eq 0 ]; then echo "0" | sed -${E} "s,0,${SED_GREEN},"; else echo "$unpriv_userns_clone" | sed -${E} "s,.*,${SED_RED},g"; fi
fi
#-- SY) Unprivileged eBPF #-- SY) Unprivileged eBPF
print_sysctl_eq_zero "unpriv_bpf_disabled? ........... " "/proc/sys/kernel/unprivileged_bpf_disabled" "unpriv_bpf_disabled" "$SED_RED" "$SED_GREEN" print_list "unpriv_bpf_disabled? ........... "$NC
unpriv_bpf_disabled=$(cat /proc/sys/kernel/unprivileged_bpf_disabled 2>/dev/null)
if [ -z "$unpriv_bpf_disabled" ]; then
echo_not_found "/proc/sys/kernel/unprivileged_bpf_disabled"
else
if [ "$unpriv_bpf_disabled" -eq 0 ]; then echo "0" | sed -${E} "s,0,${SED_RED},"; else echo "$unpriv_bpf_disabled" | sed -${E} "s,.*,${SED_GREEN},g"; fi
fi
#-- SY) cgroup2 #-- SY) cgroup2
print_list "Cgroup2 enabled? ............... "$NC print_list "Cgroup2 enabled? ............... "$NC
([ "$(grep cgroup2 /proc/filesystems 2>/dev/null)" ] && echo "enabled" || echo "disabled") | sed "s,disabled,${SED_RED}," | sed "s,enabled,${SED_GREEN}," ([ "$(grep cgroup2 /proc/filesystems 2>/dev/null)" ] && echo "enabled" || echo "disabled") | sed "s,disabled,${SED_RED}," | sed "s,enabled,${SED_GREEN},"
#-- SY) Kernel hardening sysctls #-- SY) Kernel hardening sysctls
print_sysctl_eq_zero "kptr_restrict? ................. " "/proc/sys/kernel/kptr_restrict" "kptr_restrict" "$SED_RED" "$SED_GREEN" print_list "kptr_restrict? ................. "$NC
kptr_restrict=$(cat /proc/sys/kernel/kptr_restrict 2>/dev/null)
if [ -z "$kptr_restrict" ]; then
echo_not_found "/proc/sys/kernel/kptr_restrict"
else
if [ "$kptr_restrict" -eq 0 ]; then echo "0" | sed -${E} "s,0,${SED_RED},"; else echo "$kptr_restrict" | sed -${E} "s,.*,${SED_GREEN},g"; fi
fi
print_sysctl_eq_zero "dmesg_restrict? ................ " "/proc/sys/kernel/dmesg_restrict" "dmesg_restrict" "$SED_RED" "$SED_GREEN" print_list "dmesg_restrict? ................ "$NC
dmesg_restrict=$(cat /proc/sys/kernel/dmesg_restrict 2>/dev/null)
if [ -z "$dmesg_restrict" ]; then
echo_not_found "/proc/sys/kernel/dmesg_restrict"
else
if [ "$dmesg_restrict" -eq 0 ]; then echo "0" | sed -${E} "s,0,${SED_RED},"; else echo "$dmesg_restrict" | sed -${E} "s,.*,${SED_GREEN},g"; fi
fi
print_sysctl_eq_zero "ptrace_scope? .................. " "/proc/sys/kernel/yama/ptrace_scope" "ptrace_scope" "$SED_RED" "$SED_GREEN" print_list "ptrace_scope? .................. "$NC
ptrace_scope=$(cat /proc/sys/kernel/yama/ptrace_scope 2>/dev/null)
if [ -z "$ptrace_scope" ]; then
echo_not_found "/proc/sys/kernel/yama/ptrace_scope"
else
if [ "$ptrace_scope" -eq 0 ]; then echo "0" | sed -${E} "s,0,${SED_RED},"; else echo "$ptrace_scope" | sed -${E} "s,.*,${SED_GREEN},g"; fi
fi
print_sysctl_eq_zero "protected_symlinks? ............ " "/proc/sys/fs/protected_symlinks" "protected_symlinks" "$SED_RED" "$SED_GREEN" print_list "protected_symlinks? ............ "$NC
protected_symlinks=$(cat /proc/sys/fs/protected_symlinks 2>/dev/null)
if [ -z "$protected_symlinks" ]; then
echo_not_found "/proc/sys/fs/protected_symlinks"
else
if [ "$protected_symlinks" -eq 0 ]; then echo "0" | sed -${E} "s,0,${SED_RED},"; else echo "$protected_symlinks" | sed -${E} "s,.*,${SED_GREEN},g"; fi
fi
print_sysctl_eq_zero "protected_hardlinks? ........... " "/proc/sys/fs/protected_hardlinks" "protected_hardlinks" "$SED_RED" "$SED_GREEN" print_list "protected_hardlinks? ........... "$NC
protected_hardlinks=$(cat /proc/sys/fs/protected_hardlinks 2>/dev/null)
if [ -z "$protected_hardlinks" ]; then
echo_not_found "/proc/sys/fs/protected_hardlinks"
else
if [ "$protected_hardlinks" -eq 0 ]; then echo "0" | sed -${E} "s,0,${SED_RED},"; else echo "$protected_hardlinks" | sed -${E} "s,.*,${SED_GREEN},g"; fi
fi
print_list "perf_event_paranoid? ........... "$NC print_list "perf_event_paranoid? ........... "$NC
perf_event_paranoid=$(cat /proc/sys/kernel/perf_event_paranoid 2>/dev/null) perf_event_paranoid=$(cat /proc/sys/kernel/perf_event_paranoid 2>/dev/null)
@@ -131,7 +151,13 @@ else
if [ "$perf_event_paranoid" -le 1 ]; then echo "$perf_event_paranoid" | sed -${E} "s,.*,${SED_RED},g"; else echo "$perf_event_paranoid" | sed -${E} "s,.*,${SED_GREEN},g"; fi if [ "$perf_event_paranoid" -le 1 ]; then echo "$perf_event_paranoid" | sed -${E} "s,.*,${SED_RED},g"; else echo "$perf_event_paranoid" | sed -${E} "s,.*,${SED_GREEN},g"; fi
fi fi
print_sysctl_eq_zero "mmap_min_addr? ................. " "/proc/sys/vm/mmap_min_addr" "mmap_min_addr" "$SED_RED" "$SED_GREEN" print_list "mmap_min_addr? ................. "$NC
mmap_min_addr=$(cat /proc/sys/vm/mmap_min_addr 2>/dev/null)
if [ -z "$mmap_min_addr" ]; then
echo_not_found "/proc/sys/vm/mmap_min_addr"
else
if [ "$mmap_min_addr" -eq 0 ]; then echo "0" | sed -${E} "s,0,${SED_RED},"; else echo "$mmap_min_addr" | sed -${E} "s,.*,${SED_GREEN},g"; fi
fi
print_list "lockdown mode? ................. "$NC print_list "lockdown mode? ................. "$NC
if [ -f "/sys/kernel/security/lockdown" ]; then if [ -f "/sys/kernel/security/lockdown" ]; then

View File

@@ -0,0 +1,20 @@
# Title: Container - Am I Containered
# ID: CT_Am_I_contained
# Author: Carlos Polop
# Last Update: 22-08-2023
# Description: Am I Containered tool
# License: GNU GPL
# Version: 1.0
# Functions Used: print_2title, execBin
# Global Variables:
# Initial Functions:
# Generated Global Variables: $FAT_LINPEAS_AMICONTAINED
# Fat linpeas: 1
# Small linpeas: 0
if [ "$$FAT_LINPEAS_AMICONTAINED" ]; then
print_2title "Am I Containered?"
FAT_LINPEAS_AMICONTAINED="peass{https://github.com/genuinetools/amicontained/releases/latest/download/amicontained-linux-amd64}"
execBin "AmIContainered" "https://github.com/genuinetools/amicontained" "$FAT_LINPEAS_AMICONTAINED"
fi

View File

@@ -17,7 +17,7 @@
# Functions Used: print_2title, print_list, echo_not_found # Functions Used: print_2title, print_list, echo_not_found
# Global Variables: $SEARCH_IN_FOLDER, $Wfolders, $SED_RED, $SED_RED_YELLOW, $NC # Global Variables: $SEARCH_IN_FOLDER, $Wfolders, $SED_RED, $SED_RED_YELLOW, $NC
# Initial Functions: # Initial Functions:
# Generated Global Variables: $WRITABLESYSTEMDPATH, $line, $service, $file, $version, $user, $caps, $path, $path_line, $service_file, $exec_line, $exec_value, $cmd, $cmd_path # Generated Global Variables: $WRITABLESYSTEMDPATH, $line, $service, $file, $version, $user, $caps, $path, $path_line, $service_file, $exec_line, $cmd
# Fat linpeas: 0 # Fat linpeas: 0
# Small linpeas: 1 # Small linpeas: 1
@@ -116,20 +116,18 @@ if ! [ "$SEARCH_IN_FOLDER" ]; then
# Check ExecStart paths # Check ExecStart paths
grep -E "ExecStart|ExecStartPre|ExecStartPost" "$service_file" 2>/dev/null | grep -E "ExecStart|ExecStartPre|ExecStartPost" "$service_file" 2>/dev/null |
while read -r exec_line; do while read -r exec_line; do
# Extract command from the right side of Exec*=, not from argv # Extract the first word after ExecStart* as the command
exec_value="${exec_line#*=}" cmd=$(echo "$exec_line" | awk '{print $2}' | tr -d '"')
exec_value=$(echo "$exec_value" | sed 's/^[[:space:]]*//') # Extract the rest as arguments
cmd=$(echo "$exec_value" | awk '{print $1}' | tr -d '"') args=$(echo "$exec_line" | awk '{$1=$2=""; print $0}' | tr -d '"')
# Strip systemd command prefixes (-, @, :, +, !) before path checks
cmd_path=$(echo "$cmd" | sed -E 's/^[-@:+!]+//')
# Only check the command path, not arguments # Only check the command path, not arguments
if [ -n "$cmd_path" ] && [ -w "$cmd_path" ]; then if [ -n "$cmd" ] && [ -w "$cmd" ]; then
echo "$service: $cmd_path (from $exec_line)" | sed -${E} "s,.*,${SED_RED},g" echo "$service: $cmd (from $exec_line)" | sed -${E} "s,.*,${SED_RED},g"
fi fi
# Check for relative paths only in the command, not arguments # Check for relative paths only in the command, not arguments
if [ -n "$cmd_path" ] && [ "${cmd_path#/}" = "$cmd_path" ] && [ "${cmd_path#\$}" = "$cmd_path" ]; then if [ -n "$cmd" ] && [ "${cmd#/}" = "$cmd" ] && ! echo "$cmd" | grep -qE '^-|^--'; then
echo "$service: Uses relative path '$cmd_path' (from $exec_line)" | sed -${E} "s,.*,${SED_RED},g" echo "$service: Uses relative path '$cmd' (from $exec_line)" | sed -${E} "s,.*,${SED_RED},g"
fi fi
done done
fi fi
@@ -155,4 +153,4 @@ if ! [ "$SEARCH_IN_FOLDER" ]; then
fi fi
echo "" echo ""
fi fi

View File

@@ -1,36 +0,0 @@
# Title: Users Information - subuid/subgid mappings
# ID: UG_Subuid_subgid_mappings
# Author: Carlos Polop
# Last Update: 13-02-2026
# Description: Show delegated user namespace ID ranges from /etc/subuid and /etc/subgid.
# License: GNU GPL
# Version: 1.0
# Functions Used: print_2title
# Global Variables: $MACPEAS
# Initial Functions:
# Generated Global Variables:
# Fat linpeas: 0
# Small linpeas: 1
print_2title "User namespace mappings (subuid/subgid)"
if [ "$MACPEAS" ]; then
echo "Not applicable on macOS"
else
if [ -r /etc/subuid ]; then
echo "subuid:"
grep -v -E '^\s*#|^\s*$' /etc/subuid 2>/dev/null
else
echo "/etc/subuid not readable or not present"
fi
if [ -r /etc/subgid ]; then
echo ""
echo "subgid:"
grep -v -E '^\s*#|^\s*$' /etc/subgid 2>/dev/null
else
echo "/etc/subgid not readable or not present"
fi
fi
echo ""

View File

@@ -0,0 +1,30 @@
# Title: Software Information - Checking leaks in git repositories
# ID: SI_Leaks_git_repo
# Author: Carlos Polop
# Last Update: 22-08-2023
# Description: Checking leaks in git repositories
# License: GNU GPL
# Version: 1.0
# Functions Used: execBin, print_2title
# Global Variables: $MACPEAS, $TIMEOUT
# Initial Functions:
# Generated Global Variables: $git_dirname, $FAT_LINPEAS_GITLEAKS
# Fat linpeas: 1
# Small linpeas: 0
if ! [ "$FAST" ] && ! [ "$SUPERFAST" ] && [ "$TIMEOUT" ]; then
print_2title "Checking leaks in git repositories"
printf "%s\n" "$PSTORAGE_GITHUB" | while read f; do
if echo "$f" | grep -Eq ".git$"; then
git_dirname=$(dirname "$f")
if [ "$MACPEAS" ]; then
FAT_LINPEAS_GITLEAKS="peass{https://github.com/gitleaks/gitleaks/releases/download/v8.17.0/gitleaks_8.17.0_darwin_arm64.tar.gz}"
else
FAT_LINPEAS_GITLEAKS="peass{https://github.com/gitleaks/gitleaks/releases/download/v8.17.0/gitleaks_8.17.0_linux_x64.tar.gz}"
fi
execBin "GitLeaks (checking $git_dirname)" "https://github.com/zricethezav/gitleaks" "$FAT_LINPEAS_GITLEAKS" "detect -s '$git_dirname' -v | grep -E 'Description|Match|Secret|Message|Date'"
fi
done
echo ""
fi

View File

@@ -46,7 +46,7 @@ class LinpeasBuilder:
def build(self): def build(self):
print("[+] Building variables...") print("[+] Building variables...")
variables = self.__generate_variabless() variables = self.__generate_variables()
self.__replace_mark(PEAS_VARIABLES_MARKUP, variables, "") self.__replace_mark(PEAS_VARIABLES_MARKUP, variables, "")
if len(re.findall(r"PSTORAGE_[a-zA-Z0-9_]+", self.linpeas_sh)) > 1: #Only add storages if there are storages (PSTORAGE_BACKUPS is always there so it doesn't count) if len(re.findall(r"PSTORAGE_[a-zA-Z0-9_]+", self.linpeas_sh)) > 1: #Only add storages if there are storages (PSTORAGE_BACKUPS is always there so it doesn't count)

View File

@@ -8,7 +8,6 @@ from .yamlGlobals import (
class LinpeasModule: class LinpeasModule:
def __init__(self, path): def __init__(self, path):
self.path = path self.path = path
real_path = os.path.realpath(path)
with open(path, 'r') as file: with open(path, 'r') as file:
self.module_text = file.read() self.module_text = file.read()
@@ -30,7 +29,7 @@ class LinpeasModule:
self.section_info = {} self.section_info = {}
if not (self.is_base or self.is_function or self.is_variable): if not (self.is_base or self.is_function or self.is_variable):
for module in LINPEAS_PARTS["modules"]: for module in LINPEAS_PARTS["modules"]:
if os.path.realpath(module["folder_path"]) in real_path: if module["folder_path"] in path:
self.section_info = module self.section_info = module
self.is_check = True self.is_check = True
break break

View File

@@ -1,40 +0,0 @@
import os
import stat
import subprocess
import tempfile
import unittest
from pathlib import Path
class LinpeasBuilderTests(unittest.TestCase):
def setUp(self):
self.repo_root = Path(__file__).resolve().parents[2]
self.linpeas_dir = self.repo_root / "linPEAS"
def _run_builder(self, args, output_path):
cmd = ["python3", "-m", "builder.linpeas_builder"] + args + ["--output", str(output_path)]
result = subprocess.run(cmd, cwd=str(self.linpeas_dir), capture_output=True, text=True)
if result.returncode != 0:
raise AssertionError(
f"linpeas_builder failed:\nstdout:\n{result.stdout}\nstderr:\n{result.stderr}"
)
def test_small_build_creates_executable(self):
with tempfile.TemporaryDirectory() as tmpdir:
output_path = Path(tmpdir) / "linpeas_small.sh"
self._run_builder(["--small"], output_path)
self.assertTrue(output_path.exists(), "linpeas_small.sh was not created.")
mode = output_path.stat().st_mode
self.assertTrue(mode & stat.S_IXUSR, "linpeas_small.sh is not executable.")
def test_include_exclude_modules(self):
with tempfile.TemporaryDirectory() as tmpdir:
output_path = Path(tmpdir) / "linpeas_include.sh"
self._run_builder(["--include", "system_information,container", "--exclude", "container"], output_path)
content = output_path.read_text(encoding="utf-8", errors="ignore")
self.assertIn("Operative system", content)
self.assertNotIn("Am I Containered?", content)
if __name__ == "__main__":
unittest.main()

View File

@@ -1,60 +0,0 @@
import re
import sys
import unittest
from pathlib import Path
class LinpeasModulesMetadataTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.repo_root = Path(__file__).resolve().parents[2]
cls.linpeas_dir = cls.repo_root / "linPEAS"
cls.parts_dir = cls.linpeas_dir / "builder" / "linpeas_parts"
# Ensure `import builder.*` works when tests are run from repo root.
sys.path.insert(0, str(cls.linpeas_dir))
from builder.src.linpeasModule import LinpeasModule # pylint: disable=import-error
cls.LinpeasModule = LinpeasModule
def _iter_module_files(self):
return sorted(self.parts_dir.rglob("*.sh"))
def test_all_modules_parse(self):
module_files = self._iter_module_files()
self.assertGreater(len(module_files), 0, "No linPEAS module files were found.")
# Parsing a module validates its metadata and dependencies.
for path in module_files:
_ = self.LinpeasModule(str(path))
def test_check_module_id_matches_filename(self):
for path in self._iter_module_files():
module = self.LinpeasModule(str(path))
if not getattr(module, "is_check", False):
continue
# For checks, the filename (without numeric prefix) must match the module ID
# (either full ID or stripping section prefix like `SI_`).
file_base = re.sub(r"^[0-9]+_", "", path.stem)
module_id = getattr(module, "id", "")
module_id_tail = module_id[3:] if len(module_id) >= 3 else ""
self.assertIn(
file_base,
{module_id, module_id_tail},
f"Module ID mismatch in {path}: id={module_id} expected suffix={file_base}",
)
def test_module_ids_are_unique(self):
ids = []
for path in self._iter_module_files():
module = self.LinpeasModule(str(path))
ids.append(getattr(module, "id", ""))
duplicates = {x for x in ids if x and ids.count(x) > 1}
self.assertEqual(set(), duplicates, f"Duplicate module IDs found: {sorted(duplicates)}")
if __name__ == "__main__":
unittest.main()

View File

@@ -1,36 +0,0 @@
using System;
using System.Reflection;
using Microsoft.VisualStudio.TestTools.UnitTesting;
namespace winPEAS.Tests
{
[TestClass]
public class ArgumentParsingTests
{
private static bool InvokeIsNetworkTypeValid(string arg)
{
var method = typeof(winPEAS.Checks.Checks).GetMethod("IsNetworkTypeValid", BindingFlags.NonPublic | BindingFlags.Static);
Assert.IsNotNull(method, "IsNetworkTypeValid method not found.");
return (bool)method.Invoke(null, new object[] { arg });
}
[TestMethod]
public void ShouldAcceptValidNetworkTypes()
{
Assert.IsTrue(InvokeIsNetworkTypeValid("-network=auto"));
Assert.IsTrue(InvokeIsNetworkTypeValid("-network=10.10.10.10"));
Assert.IsTrue(InvokeIsNetworkTypeValid("-network=10.10.10.10/24"));
Assert.IsTrue(InvokeIsNetworkTypeValid("-network=10.10.10.10,10.10.10.20"));
}
[TestMethod]
public void ShouldRejectInvalidNetworkTypes()
{
Assert.IsFalse(InvokeIsNetworkTypeValid("-network="));
Assert.IsFalse(InvokeIsNetworkTypeValid("-network=10.10.10.999"));
Assert.IsFalse(InvokeIsNetworkTypeValid("-network=10.10.10.10/64"));
Assert.IsFalse(InvokeIsNetworkTypeValid("-network=999.999.999.999/24"));
Assert.IsFalse(InvokeIsNetworkTypeValid("-network=not-an-ip"));
}
}
}

View File

@@ -1,37 +0,0 @@
using System;
using Microsoft.VisualStudio.TestTools.UnitTesting;
namespace winPEAS.Tests
{
[TestClass]
public class ChecksArgumentEdgeCasesTests
{
[TestMethod]
public void ShouldNotThrowOnEmptyLogFileArg()
{
// Should return early with a user-friendly error, not crash.
Program.Main(new[] { "log=" });
}
[TestMethod]
public void ShouldNotThrowOnPortsWithoutNetwork()
{
// Should warn and return early because -network was not provided.
Program.Main(new[] { "-ports=80,443" });
}
[TestMethod]
public void ShouldNotThrowOnInvalidNetworkArgument()
{
// Should warn and return early because the IP is invalid.
Program.Main(new[] { "-network=10.10.10.999" });
}
[TestMethod]
public void ShouldNotThrowOnEmptyNetworkArgument()
{
// Should warn and return early because the value is empty.
Program.Main(new[] { "-network=" });
}
}
}

View File

@@ -61,11 +61,9 @@
</Reference> </Reference>
<Reference Include="Microsoft.VisualStudio.TestPlatform.TestFramework, Version=14.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL"> <Reference Include="Microsoft.VisualStudio.TestPlatform.TestFramework, Version=14.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
<HintPath>..\packages\MSTest.TestFramework.2.2.5\lib\net45\Microsoft.VisualStudio.TestPlatform.TestFramework.dll</HintPath> <HintPath>..\packages\MSTest.TestFramework.2.2.5\lib\net45\Microsoft.VisualStudio.TestPlatform.TestFramework.dll</HintPath>
<Private>True</Private>
</Reference> </Reference>
<Reference Include="Microsoft.VisualStudio.TestPlatform.TestFramework.Extensions, Version=14.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL"> <Reference Include="Microsoft.VisualStudio.TestPlatform.TestFramework.Extensions, Version=14.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
<HintPath>..\packages\MSTest.TestFramework.2.2.5\lib\net45\Microsoft.VisualStudio.TestPlatform.TestFramework.Extensions.dll</HintPath> <HintPath>..\packages\MSTest.TestFramework.2.2.5\lib\net45\Microsoft.VisualStudio.TestPlatform.TestFramework.Extensions.dll</HintPath>
<Private>True</Private>
</Reference> </Reference>
<Reference Include="System" /> <Reference Include="System" />
<Reference Include="System.ComponentModel.Composition" /> <Reference Include="System.ComponentModel.Composition" />
@@ -97,7 +95,6 @@
<Reference Include="System.Xml" /> <Reference Include="System.Xml" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<Compile Include="ArgumentParsingTests.cs" />
<Compile Include="Properties\AssemblyInfo.cs" /> <Compile Include="Properties\AssemblyInfo.cs" />
<Compile Include="SmokeTests.cs" /> <Compile Include="SmokeTests.cs" />
</ItemGroup> </ItemGroup>
@@ -111,40 +108,6 @@
<Name>winPEAS</Name> <Name>winPEAS</Name>
</ProjectReference> </ProjectReference>
</ItemGroup> </ItemGroup>
<Target Name="CopyVSTestFrameworkToMSTestAdapter" AfterTargets="Build">
<PropertyGroup>
<_PackagesDir>$(MSBuildThisFileDirectory)..\packages\</_PackagesDir>
<_MSTestFrameworkDir>$(_PackagesDir)MSTest.TestFramework.2.2.5\lib\net45\</_MSTestFrameworkDir>
</PropertyGroup>
<ItemGroup Condition="Exists('$(_MSTestFrameworkDir)')">
<_VSTestFrameworkDlls Include="$(_MSTestFrameworkDir)Microsoft.VisualStudio.TestPlatform.TestFramework*.dll" />
</ItemGroup>
<ItemGroup>
<_VSTestCopyDirs Include="$(TargetDir)" Condition="'$(TargetDir)' != '' AND Exists('$(TargetDir)')" />
<_MSTestAdapterDirs Include="$(_PackagesDir)MSTest.TestAdapter.2.2.5\build\net45\" Condition="Exists('$(_PackagesDir)MSTest.TestAdapter.2.2.5\build\net45\')" />
<_MSTestAdapterDirs Include="$(_PackagesDir)MSTest.TestAdapter.2.2.5\build\_common\" Condition="Exists('$(_PackagesDir)MSTest.TestAdapter.2.2.5\build\_common\')" />
</ItemGroup>
<Message
Condition="@(_VSTestFrameworkDlls) != ''"
Importance="high"
Text="CopyVSTestFrameworkToMSTestAdapter: copying @( _VSTestFrameworkDlls )" />
<Copy
Condition="@(_VSTestFrameworkDlls) != '' AND @(_VSTestCopyDirs) != ''"
SourceFiles="@(_VSTestFrameworkDlls)"
DestinationFolder="%(_VSTestCopyDirs.Identity)"
SkipUnchangedFiles="true" />
<Copy
Condition="@(_VSTestFrameworkDlls) != '' AND @(_MSTestAdapterDirs) != ''"
SourceFiles="@(_VSTestFrameworkDlls)"
DestinationFolder="%(_MSTestAdapterDirs.Identity)"
SkipUnchangedFiles="true" />
</Target>
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" /> <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
<Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild"> <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
<PropertyGroup> <PropertyGroup>
@@ -170,4 +133,4 @@
<Import Project="..\packages\Stub.System.Data.SQLite.Core.NetFramework.1.0.119.0\build\net451\Stub.System.Data.SQLite.Core.NetFramework.targets" Condition="Exists('..\packages\Stub.System.Data.SQLite.Core.NetFramework.1.0.119.0\build\net451\Stub.System.Data.SQLite.Core.NetFramework.targets')" /> <Import Project="..\packages\Stub.System.Data.SQLite.Core.NetFramework.1.0.119.0\build\net451\Stub.System.Data.SQLite.Core.NetFramework.targets" Condition="Exists('..\packages\Stub.System.Data.SQLite.Core.NetFramework.1.0.119.0\build\net451\Stub.System.Data.SQLite.Core.NetFramework.targets')" />
<Import Project="..\packages\Fody.6.5.5\build\Fody.targets" Condition="Exists('..\packages\Fody.6.5.5\build\Fody.targets')" /> <Import Project="..\packages\Fody.6.5.5\build\Fody.targets" Condition="Exists('..\packages\Fody.6.5.5\build\Fody.targets')" />
<Import Project="..\packages\Costura.Fody.5.7.0\build\Costura.Fody.targets" Condition="Exists('..\packages\Costura.Fody.5.7.0\build\Costura.Fody.targets')" /> <Import Project="..\packages\Costura.Fody.5.7.0\build\Costura.Fody.targets" Condition="Exists('..\packages\Costura.Fody.5.7.0\build\Costura.Fody.targets')" />
</Project> </Project>

View File

@@ -356,7 +356,7 @@ namespace winPEAS.Checks
{ {
var rangeParts = networkType.Split('/'); var rangeParts = networkType.Split('/');
if (rangeParts.Length == 2 && IPAddress.TryParse(rangeParts[0], out _) && int.TryParse(rangeParts[1], out int res) && res <= 32 && res >= 0) if (rangeParts.Length == 2 && int.TryParse(rangeParts[1], out int res) && res <= 32 && res >= 0)
{ {
return true; return true;
} }

View File

@@ -524,7 +524,7 @@ namespace winPEAS.Checks
{ {
Beaprint.MainPrint("Looking for documents --limit 100--"); Beaprint.MainPrint("Looking for documents --limit 100--");
List<string> docFiles = InterestingFiles.InterestingFiles.ListUsersDocs(); List<string> docFiles = InterestingFiles.InterestingFiles.ListUsersDocs();
Beaprint.ListPrint(MyUtils.GetLimitedRange(docFiles, 100)); Beaprint.ListPrint(docFiles.GetRange(0, docFiles.Count <= 100 ? docFiles.Count : 100));
} }
catch (Exception ex) catch (Exception ex)
{ {
@@ -546,7 +546,7 @@ namespace winPEAS.Checks
if (recFiles.Count != 0) if (recFiles.Count != 0)
{ {
foreach (Dictionary<string, string> recF in MyUtils.GetLimitedRange(recFiles, 70)) foreach (Dictionary<string, string> recF in recFiles.GetRange(0, recFiles.Count <= 70 ? recFiles.Count : 70))
{ {
Beaprint.AnsiPrint(" " + recF["Target"] + "(" + recF["Accessed"] + ")", colorF); Beaprint.AnsiPrint(" " + recF["Target"] + "(" + recF["Accessed"] + ")", colorF);
} }

View File

@@ -348,7 +348,8 @@ namespace winPEAS.Checks
Beaprint.MainPrint("DNS cached --limit 70--"); Beaprint.MainPrint("DNS cached --limit 70--");
Beaprint.GrayPrint(string.Format(" {0,-38}{1,-38}{2}", "Entry", "Name", "Data")); Beaprint.GrayPrint(string.Format(" {0,-38}{1,-38}{2}", "Entry", "Name", "Data"));
List<Dictionary<string, string>> DNScache = NetworkInfoHelper.GetDNSCache(); List<Dictionary<string, string>> DNScache = NetworkInfoHelper.GetDNSCache();
foreach (Dictionary<string, string> entry in MyUtils.GetLimitedRange(DNScache, 70)) foreach (Dictionary<string, string> entry in DNScache.GetRange(0,
DNScache.Count <= 70 ? DNScache.Count : 70))
{ {
Console.WriteLine($" {entry["Entry"],-38}{entry["Name"],-38}{entry["Data"]}"); Console.WriteLine($" {entry["Entry"],-38}{entry["Name"],-38}{entry["Data"]}");
} }

View File

@@ -21,11 +21,6 @@ namespace winPEAS.Helpers
""); //To get the default object you need to use an empty string ""); //To get the default object you need to use an empty string
} }
public static List<T> GetLimitedRange<T>(List<T> items, int limit)
{
return items.GetRange(0, Math.Min(items.Count, limit));
}
//////////////////////////////////// ////////////////////////////////////
/////// MISC - Files & Paths /////// /////// MISC - Files & Paths ///////
//////////////////////////////////// ////////////////////////////////////

View File

@@ -11,7 +11,6 @@ namespace winPEAS
[STAThread] [STAThread]
public static void Main(string[] args) public static void Main(string[] args)
{ {
// TODO: keep Main minimal; this line was an intentional break in test PR.
Checks.Checks.Run(args); Checks.Checks.Run(args);
} }
} }

View File

@@ -57,7 +57,7 @@
<Prefer32Bit>false</Prefer32Bit> <Prefer32Bit>false</Prefer32Bit>
<LangVersion>8.0</LangVersion> <LangVersion>8.0</LangVersion>
<RunCodeAnalysis>false</RunCodeAnalysis> <RunCodeAnalysis>false</RunCodeAnalysis>
<CodeAnalysisRuleSet Condition="Exists('MinimumRecommendedRules.ruleset')">MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet> <CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks> <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
</PropertyGroup> </PropertyGroup>
<PropertyGroup> <PropertyGroup>
@@ -71,7 +71,7 @@
<PlatformTarget>AnyCPU</PlatformTarget> <PlatformTarget>AnyCPU</PlatformTarget>
<LangVersion>8.0</LangVersion> <LangVersion>8.0</LangVersion>
<ErrorReport>prompt</ErrorReport> <ErrorReport>prompt</ErrorReport>
<CodeAnalysisRuleSet Condition="Exists('MinimumRecommendedRules.ruleset')">MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet> <CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
<Prefer32Bit>false</Prefer32Bit> <Prefer32Bit>false</Prefer32Bit>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks> <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<NoWarn>0168 ; 0169; 0414; 0618; 0649</NoWarn> <NoWarn>0168 ; 0169; 0414; 0618; 0649</NoWarn>
@@ -84,7 +84,7 @@
<PlatformTarget>x64</PlatformTarget> <PlatformTarget>x64</PlatformTarget>
<LangVersion>8.0</LangVersion> <LangVersion>8.0</LangVersion>
<ErrorReport>prompt</ErrorReport> <ErrorReport>prompt</ErrorReport>
<CodeAnalysisRuleSet Condition="Exists('MinimumRecommendedRules.ruleset')">MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet> <CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
<Prefer32Bit>false</Prefer32Bit> <Prefer32Bit>false</Prefer32Bit>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks> <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
</PropertyGroup> </PropertyGroup>
@@ -96,7 +96,7 @@
<PlatformTarget>x86</PlatformTarget> <PlatformTarget>x86</PlatformTarget>
<LangVersion>8.0</LangVersion> <LangVersion>8.0</LangVersion>
<ErrorReport>prompt</ErrorReport> <ErrorReport>prompt</ErrorReport>
<CodeAnalysisRuleSet Condition="Exists('MinimumRecommendedRules.ruleset')">MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet> <CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
<Prefer32Bit>false</Prefer32Bit> <Prefer32Bit>false</Prefer32Bit>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks> <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
</PropertyGroup> </PropertyGroup>
@@ -108,7 +108,7 @@
<PlatformTarget>x86</PlatformTarget> <PlatformTarget>x86</PlatformTarget>
<LangVersion>8.0</LangVersion> <LangVersion>8.0</LangVersion>
<ErrorReport>prompt</ErrorReport> <ErrorReport>prompt</ErrorReport>
<CodeAnalysisRuleSet Condition="Exists('MinimumRecommendedRules.ruleset')">MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet> <CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
<Prefer32Bit>false</Prefer32Bit> <Prefer32Bit>false</Prefer32Bit>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks> <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
</PropertyGroup> </PropertyGroup>

View File

@@ -1677,7 +1677,7 @@ if ($TimeStamp) { TimeElapsed }
Write-Host -ForegroundColor Blue "=========|| WHOAMI INFO" Write-Host -ForegroundColor Blue "=========|| WHOAMI INFO"
Write-Host "" Write-Host ""
if ($TimeStamp) { TimeElapsed } if ($TimeStamp) { TimeElapsed }
Write-Host -ForegroundColor Blue "=========|| Check Token access here: https://book.hacktricks.wiki/en/windows-hardening/windows-local-privilege-escalation/privilege-escalation-abusing-tokens.html#abusing-tokens" Write-Host -ForegroundColor Blue "=========|| Check Token access here: https://book.hacktricks.wiki/en/windows-hardening/windows-local-privilege-escalation/privilege-escalation-abusing-tokens.html#abusing-tokens" -ForegroundColor yellow
Write-Host -ForegroundColor Blue "=========|| Check if you are inside the Administrators group or if you have enabled any token that can be use to escalate privileges like SeImpersonatePrivilege, SeAssignPrimaryPrivilege, SeTcbPrivilege, SeBackupPrivilege, SeRestorePrivilege, SeCreateTokenPrivilege, SeLoadDriverPrivilege, SeTakeOwnershipPrivilege, SeDebugPrivilege" Write-Host -ForegroundColor Blue "=========|| Check if you are inside the Administrators group or if you have enabled any token that can be use to escalate privileges like SeImpersonatePrivilege, SeAssignPrimaryPrivilege, SeTcbPrivilege, SeBackupPrivilege, SeRestorePrivilege, SeCreateTokenPrivilege, SeLoadDriverPrivilege, SeTakeOwnershipPrivilege, SeDebugPrivilege"
Write-Host "https://book.hacktricks.wiki/en/windows-hardening/windows-local-privilege-escalation/index.html#users--groups" -ForegroundColor Yellow Write-Host "https://book.hacktricks.wiki/en/windows-hardening/windows-local-privilege-escalation/index.html#users--groups" -ForegroundColor Yellow
Start-Process whoami.exe -ArgumentList "/all" -Wait -NoNewWindow Start-Process whoami.exe -ArgumentList "/all" -Wait -NoNewWindow