mirror of
https://github.com/carlospolop/privilege-escalation-awesome-scripts-suite.git
synced 2025-12-07 17:41:29 +00:00
Compare commits
1 Commits
20220207
...
refs/pull/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
89b62c4e3b |
11
.github/workflows/CI-master_tests.yml
vendored
11
.github/workflows/CI-master_tests.yml
vendored
@@ -4,9 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
schedule:
|
||||
- cron: "5 4 * * SUN"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -365,10 +362,6 @@ jobs:
|
||||
with:
|
||||
name: linpeas_darwin_arm64
|
||||
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: echo "::set-output name=date::$(date +'%Y%m%d')"
|
||||
|
||||
# Create the release
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
@@ -376,8 +369,8 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{steps.date.outputs.date}}
|
||||
release_name: Release ${{ github.ref }} ${{steps.date.outputs.date}}
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: Release ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
|
||||
@@ -21,9 +21,6 @@ These tools search for possible **local privilege escalation paths** that you co
|
||||
## Quick Start
|
||||
Find the **latest versions of all the scripts and binaries in [the releases page](https://github.com/carlospolop/PEASS-ng/releases/latest)**.
|
||||
|
||||
## JSON, HTML & PDF output
|
||||
Check the **[parsers](./parsers/)** directory to **transform PEASS outputs to JSON, HTML and PDF**
|
||||
|
||||
## Let's improve PEASS together
|
||||
|
||||
If you want to **add something** and have **any cool idea** related to this project, please let me know it in the **telegram group https://t.me/peass** or contribute reading the **[CONTRIBUTING.md](https://github.com/carlospolop/privilege-escalation-awesome-scripts-suite/blob/master/CONTRIBUTING.md)** file.
|
||||
|
||||
@@ -21,17 +21,6 @@ else echo_not_found "sudo"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
#-- SY) CVE-2021-4034
|
||||
if [ `command -v pkexec` ] && stat -c '%a' $(which pkexec) | grep -q 4755 && [ "$(stat -c '%Y' $(which pkexec))" -lt "1642035600" ]; then
|
||||
echo "Vulnerable to CVE-2021-4034" | sed -${E} "s,.*,${SED_RED_YELLOW},"
|
||||
fi
|
||||
|
||||
#-- SY) CVE-2021-3560
|
||||
polkitVersion=$(systemctl status polkit.service | grep version | cut -d " " -f 9)
|
||||
if [[ "$(apt list --installed 2>/dev/null | grep polkit | grep -c 0.105-26)" -ge 1 || "$(yum list installed | grep polkit | grep -c 0.117-2)" ]]; then
|
||||
echo "Vulnerable to CVE-2021-3560" | sed -${E} "s,.*,${SED_RED_YELLOW},"
|
||||
fi
|
||||
|
||||
#--SY) USBCreator
|
||||
if (busctl list 2>/dev/null | grep -q com.ubuntu.USBCreator) || [ "$DEBUG" ]; then
|
||||
print_2title "USBCreator"
|
||||
|
||||
@@ -37,7 +37,7 @@ class MetasploitModule < Msf::Post
|
||||
))
|
||||
register_options(
|
||||
[
|
||||
OptString.new('PEASS_URL', [true, 'Path to the PEASS script. Accepted: http(s):// URL or absolute local path. Linpeas: https://github.com/carlospolop/PEASS-ng/releases/latest/download/linpeas.sh', "https://github.com/carlospolop/PEASS-ng/releases/latest/download/winPEASany_ofs.exe"]),
|
||||
OptString.new('PEASS_URL', [true, 'Path to the PEASS script. Accepted: http(s):// URL or absolute local path. Linpeas: https://raw.githubusercontent.com/carlospolop/PEASS-ng/master/linPEAS/linpeas.sh', "https://raw.githubusercontent.com/carlospolop/PEASS-ng/master/winPEAS/winPEASexe/binaries/Obfuscated%20Releases/winPEASany.exe"]),
|
||||
OptString.new('PASSWORD', [false, 'Password to encrypt and obfuscate the script (randomly generated). The length must be 32B. If no password is set, only base64 will be used.', rand(36**32).to_s(36)]),
|
||||
OptString.new('TEMP_DIR', [false, 'Path to upload the obfuscated PEASS script inside the compromised machine. By default "C:\Windows\System32\spool\drivers\color" is used in Windows and "/tmp" in Unix.', '']),
|
||||
OptString.new('PARAMETERS', [false, 'Parameters to pass to the script', nil]),
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
# Privilege Escalation Awesome Scripts Parsers
|
||||
# Privilege Escalation Awesome Scripts JSON exporter
|
||||
|
||||
These scripts allows you to transform the output of linpeas/macpeas/winpeas to JSON and then to PDF and HTML.
|
||||
This script allows you to transform the output of linpeas/macpeas/winpeas to JSON.
|
||||
|
||||
```python3
|
||||
python3 peass2json.py </path/to/executed_peass.out> </path/to/peass.json>
|
||||
python3 json2pdf.py </path/to/peass.json> </path/to/peass.pdf>
|
||||
python3 json2html.py </path/to/peass.json> </path/to/peass.html>
|
||||
python3 peass-parser.py </path/to/executed_peass> </path/to/output_peass.json>
|
||||
```
|
||||
|
||||
This script is still in beta version and has been tested only with linpeas output.
|
||||
|
||||
## JSON Format
|
||||
## Format
|
||||
Basically, **each section has**:
|
||||
- Infos (URLs or info about the section)
|
||||
- Text lines (the real text info found in the section, colors included)
|
||||
@@ -76,4 +75,4 @@ There can also be a `<Third level Section Name>`
|
||||
|
||||
# TODO:
|
||||
|
||||
- **PRs improving the code and the aspect of the final PDFs and HTMLs are always welcome!**
|
||||
I'm looking for **someone that could create HTML and PDF reports** from this JSON.
|
||||
@@ -5,7 +5,7 @@ import re
|
||||
import json
|
||||
|
||||
# Pattern to identify main section titles
|
||||
TITLE1_PATTERN = r"══════════════╣" # The size of the first pattern varies, but at least should be that large
|
||||
TITLE1_PATTERN = r"════════════════════════════════════╣"
|
||||
TITLE2_PATTERN = r"╔══════════╣"
|
||||
TITLE3_PATTERN = r"══╣"
|
||||
INFO_PATTERN = r"╚ "
|
||||
@@ -14,15 +14,15 @@ TITLE_CHARS = ['═', '╔', '╣', '╚']
|
||||
# Patterns for colors
|
||||
## The order is important, the first string colored with a color will be the one selected (the same string cannot be colored with different colors)
|
||||
COLORS = {
|
||||
"REDYELLOW": ['\x1b[1;31;103m'],
|
||||
"RED": ['\x1b[1;31m'],
|
||||
"GREEN": ['\x1b[1;32m'],
|
||||
"YELLOW": ['\x1b[1;33m'],
|
||||
"BLUE": ['\x1b[1;34m'],
|
||||
"MAGENTA": ['\x1b[1;95m', '\x1b[1;35m'],
|
||||
"CYAN": ['\x1b[1;36m', '\x1b[1;96m'],
|
||||
"LIGHT_GREY": ['\x1b[1;37m'],
|
||||
"DARKGREY": ['\x1b[1;90m'],
|
||||
"REDYELLOW": [r"\x1b\[1;31;103m"],
|
||||
"RED": [r"\x1b\[1;31m"],
|
||||
"GREEN": [r"\x1b\[1;32m"],
|
||||
"YELLOW": [r"\x1b\[1;33m"],
|
||||
"BLUE": [r"\x1b\[1;34m"],
|
||||
"MAGENTA": [r"\x1b\[1;95m", r"\x1b\[1;35m"],
|
||||
"CYAN": [r"\x1b\[1;36m", r"\x1b\[1;96m"],
|
||||
"LIGHT_GREY": [r"\x1b\[1;37m"],
|
||||
"DARKGREY": [r"\x1b\[1;90m"],
|
||||
}
|
||||
|
||||
|
||||
@@ -52,23 +52,11 @@ def get_colors(line: str) -> dict:
|
||||
for c,regexs in COLORS.items():
|
||||
colors[c] = []
|
||||
for reg in regexs:
|
||||
split_color = line.split(reg)
|
||||
|
||||
# Start from the index 1 as the index 0 isn't colored
|
||||
if split_color and len(split_color) > 1:
|
||||
split_color = split_color[1:]
|
||||
|
||||
# For each potential color, find the string before any possible color terminatio
|
||||
for potential_color_str in split_color:
|
||||
color_str1 = potential_color_str.split('\x1b')[0]
|
||||
color_str2 = potential_color_str.split("\[0")[0]
|
||||
color_str = color_str1 if len(color_str1) < len(color_str2) else color_str2
|
||||
|
||||
if color_str:
|
||||
color_str = clean_colors(color_str.strip())
|
||||
#Avoid having the same color for the same string
|
||||
if color_str and not any(color_str in values for values in colors.values()):
|
||||
colors[c].append(color_str)
|
||||
for re_found in re.findall(reg+"(.+?)\x1b|$", line):
|
||||
re_found = clean_colors(re_found.strip())
|
||||
#Avoid having the same color for the same string
|
||||
if re_found and not any(re_found in values for values in colors.values()):
|
||||
colors[c].append(re_found)
|
||||
|
||||
if not colors[c]:
|
||||
del colors[c]
|
||||
@@ -87,10 +75,10 @@ def clean_title(line: str) -> str:
|
||||
def clean_colors(line: str) -> str:
|
||||
"""Given a line clean the colors inside of it"""
|
||||
|
||||
for reg in re.findall(r'\x1b\[[^a-zA-Z]+\dm', line):
|
||||
for reg in re.findall(r'\x1b[^ ]+\dm', line):
|
||||
line = line.replace(reg,"")
|
||||
|
||||
line = line.replace('\x1b',"").replace("[0m", "").replace("[3m", "") #Sometimes that byte stays
|
||||
line = line.replace('\x1b',"") #Sometimes that byte stays
|
||||
line = line.strip()
|
||||
return line
|
||||
|
||||
@@ -106,9 +94,6 @@ def parse_line(line: str):
|
||||
|
||||
global FINAL_JSON, C_SECTION, C_MAIN_SECTION, C_2_SECTION, C_3_SECTION
|
||||
|
||||
if "Cron jobs" in line:
|
||||
a=1
|
||||
|
||||
if is_section(line, TITLE1_PATTERN):
|
||||
title = parse_title(line)
|
||||
FINAL_JSON[title] = { "sections": {}, "lines": [], "infos": [] }
|
||||
@@ -139,8 +124,8 @@ def parse_line(line: str):
|
||||
|
||||
C_SECTION["lines"].append({
|
||||
"raw_text": line,
|
||||
"colors": get_colors(line),
|
||||
"clean_text": clean_title(clean_colors(line))
|
||||
"clean_text": clean_colors(line),
|
||||
"colors": get_colors(line)
|
||||
})
|
||||
|
||||
|
||||
@@ -162,7 +147,7 @@ if __name__ == "__main__":
|
||||
OUTPUT_PATH = sys.argv[1]
|
||||
JSON_PATH = sys.argv[2]
|
||||
except IndexError as err:
|
||||
print("Error: Please pass the peas.out file and the path to save the json\npeas2json.py <output_file> <json_file.json>")
|
||||
print("Error: Please pass the peas.out file and the path to save the json\n./peas-parser.py <output_file> <json_file.json>")
|
||||
sys.exit(1)
|
||||
|
||||
main()
|
||||
File diff suppressed because one or more lines are too long
@@ -1,162 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import json
|
||||
import html
|
||||
from reportlab.lib.pagesizes import letter
|
||||
from reportlab.platypus import Frame, Paragraph, Spacer, PageBreak,PageTemplate, BaseDocTemplate
|
||||
from reportlab.platypus.tableofcontents import TableOfContents
|
||||
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
||||
from reportlab.lib.units import cm
|
||||
|
||||
styles = getSampleStyleSheet()
|
||||
text_colors = { "GREEN": "#00DB00", "RED": "#FF0000", "REDYELLOW": "#FFA500", "BLUE": "#0000FF",
|
||||
"DARKGREY": "#5C5C5C", "YELLOW": "#ebeb21", "MAGENTA": "#FF00FF", "CYAN": "#00FFFF", "LIGHT_GREY": "#A6A6A6"}
|
||||
|
||||
# Required to automatically set Page Numbers
|
||||
class PageTemplateWithCount(PageTemplate):
|
||||
def __init__(self, id, frames, **kw):
|
||||
PageTemplate.__init__(self, id, frames, **kw)
|
||||
|
||||
def beforeDrawPage(self, canvas, doc):
|
||||
page_num = canvas.getPageNumber()
|
||||
canvas.drawRightString(10.5*cm, 1*cm, str(page_num))
|
||||
|
||||
# Required to automatically set the Table of Contents
|
||||
class MyDocTemplate(BaseDocTemplate):
|
||||
def __init__(self, filename, **kw):
|
||||
self.allowSplitting = 0
|
||||
BaseDocTemplate.__init__(self, filename, **kw)
|
||||
template = PageTemplateWithCount("normal", [Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')])
|
||||
self.addPageTemplates(template)
|
||||
|
||||
def afterFlowable(self, flowable):
|
||||
if flowable.__class__.__name__ == "Paragraph":
|
||||
text = flowable.getPlainText()
|
||||
style = flowable.style.name
|
||||
if style == "Heading1":
|
||||
self.notify("TOCEntry", (0, text, self.page))
|
||||
if style == "Heading2":
|
||||
self.notify("TOCEntry", (1, text, self.page))
|
||||
if style == "Heading3":
|
||||
self.notify("TOCEntry", (2, text, self.page))
|
||||
|
||||
|
||||
# Poor take at dynamicly generating styles depending on depth(?)
|
||||
def get_level_styles(level):
|
||||
global styles
|
||||
indent_value = 10 * (level - 1);
|
||||
# Overriding some default stylings
|
||||
level_styles = {
|
||||
"title": ParagraphStyle(
|
||||
**dict(styles[f"Heading{level}"].__dict__,
|
||||
**{ "leftIndent": indent_value })),
|
||||
"text": ParagraphStyle(
|
||||
**dict(styles["Code"].__dict__,
|
||||
**{ "backColor": "#F0F0F0",
|
||||
"borderPadding": 5, "borderWidth": 1,
|
||||
"borderColor": "black", "borderRadius": 5,
|
||||
"leftIndent": 5 + indent_value})),
|
||||
"info": ParagraphStyle(
|
||||
**dict(styles["Italic"].__dict__,
|
||||
**{ "leftIndent": indent_value })),
|
||||
}
|
||||
return level_styles
|
||||
|
||||
def get_colors_by_text(colors):
|
||||
new_colors = {}
|
||||
for (color, words) in colors.items():
|
||||
for word in words:
|
||||
new_colors[html.escape(word)] = color
|
||||
return new_colors
|
||||
|
||||
def build_main_section(section, title, level=1):
|
||||
styles = get_level_styles(level)
|
||||
has_links = "infos" in section.keys() and len(section["infos"]) > 0
|
||||
has_lines = "lines" in section.keys() and len(section["lines"]) > 1
|
||||
has_children = "sections" in section.keys() and len(section["sections"].keys()) > 0
|
||||
|
||||
# Only display data for Sections with results
|
||||
show_section = has_lines or has_children
|
||||
|
||||
elements = []
|
||||
|
||||
if show_section:
|
||||
elements.append(Paragraph(title, style=styles["title"]))
|
||||
|
||||
# Print info if any
|
||||
if show_section and has_links:
|
||||
for info in section["infos"]:
|
||||
words = info.split()
|
||||
# Join all lines and encode any links that might be present.
|
||||
words = map(lambda word: f'<a href="{word}" color="blue">{word}</a>' if "http" in word else word, words)
|
||||
words = " ".join(words)
|
||||
elements.append(Paragraph(words, style=styles["info"] ))
|
||||
|
||||
# Print lines if any
|
||||
if "lines" in section.keys() and len(section["lines"]) > 1:
|
||||
colors_by_line = list(map(lambda x: x["colors"], section["lines"]))
|
||||
lines = list(map(lambda x: html.escape(x["clean_text"]), section["lines"]))
|
||||
for (idx, line) in enumerate(lines):
|
||||
colors = colors_by_line[idx]
|
||||
colored_text = get_colors_by_text(colors)
|
||||
colored_line = line
|
||||
for (text, color) in colored_text.items():
|
||||
if color == "REDYELLOW":
|
||||
colored_line = colored_line.replace(text, f'<font color="{text_colors[color]}"><b>{text}</b></font>')
|
||||
else:
|
||||
colored_line = colored_line.replace(text, f'<font color="{text_colors[color]}">{text}</font>')
|
||||
lines[idx] = colored_line
|
||||
elements.append(Spacer(0, 10))
|
||||
line = "<br/>".join(lines)
|
||||
|
||||
# If it's a top level entry remove the line break caused by an empty "clean_text"
|
||||
if level == 1: line = line[5:]
|
||||
elements.append(Paragraph(line, style=styles["text"]))
|
||||
|
||||
|
||||
# Print child sections
|
||||
if has_children:
|
||||
for child_title in section["sections"].keys():
|
||||
element_list = build_main_section(section["sections"][child_title], child_title, level + 1)
|
||||
elements.extend(element_list)
|
||||
|
||||
# Add spacing at the end of section. The deeper the level the smaller the spacing.
|
||||
if show_section:
|
||||
elements.append(Spacer(1, 40 - (10 * level)))
|
||||
|
||||
return elements
|
||||
|
||||
|
||||
def main():
|
||||
with open(JSON_PATH) as file:
|
||||
# Read and parse JSON file
|
||||
data = json.loads(file.read())
|
||||
|
||||
# Default pdf values
|
||||
doc = MyDocTemplate(PDF_PATH)
|
||||
toc = TableOfContents()
|
||||
toc.levelStyles = [
|
||||
ParagraphStyle(name = "Heading1", fontSize = 14, leading=16),
|
||||
ParagraphStyle(name = "Heading2", fontSize = 12, leading=14, leftIndent = 10),
|
||||
ParagraphStyle(name = "Heading3", fontSize = 10, leading=12, leftIndent = 20),
|
||||
]
|
||||
|
||||
elements = [Paragraph("PEAS Report", style=styles["Title"]), Spacer(0, 30), toc, PageBreak()]
|
||||
|
||||
# Iterate over all top level sections and build their elements.
|
||||
for title in data.keys():
|
||||
element_list = build_main_section(data[title], title)
|
||||
elements.extend(element_list)
|
||||
|
||||
doc.multiBuild(elements)
|
||||
|
||||
# Start execution
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
JSON_PATH = sys.argv[1]
|
||||
PDF_PATH = sys.argv[2]
|
||||
except IndexError as err:
|
||||
print("Error: Please pass the peas.json file and the path to save the pdf\njson2pdf.py <json_file> <pdf_file.pdf>")
|
||||
sys.exit(1)
|
||||
|
||||
main()
|
||||
Reference in New Issue
Block a user