Skip to content

Commit

Permalink
Merge pull request #1199 from yarysp/fix-recursive-ffuf-launch
Browse files Browse the repository at this point in the history
Fix various ffuf bugs
  • Loading branch information
AnonymousWP authored Feb 21, 2024
2 parents f362189 + ec21cbe commit aff8040
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 14 deletions.
17 changes: 17 additions & 0 deletions web/reNgine/common_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,23 @@ def sanitize_url(http_url):
url = url._replace(netloc=url.netloc.replace(':443', ''))
return url.geturl().rstrip('/')

def extract_path_from_url(url):
parsed_url = urlparse(url)

# Reconstruct the URL without scheme and netloc
reconstructed_url = parsed_url.path

if reconstructed_url.startswith('/'):
reconstructed_url = reconstructed_url[1:] # Remove the first slash

if parsed_url.params:
reconstructed_url += ';' + parsed_url.params
if parsed_url.query:
reconstructed_url += '?' + parsed_url.query
if parsed_url.fragment:
reconstructed_url += '#' + parsed_url.fragment

return reconstructed_url

#-------#
# Utils #
Expand Down
55 changes: 41 additions & 14 deletions web/reNgine/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import yaml
import tldextract
import concurrent.futures
import base64

from datetime import datetime
from urllib.parse import urlparse
Expand Down Expand Up @@ -1662,49 +1663,75 @@ def dir_file_fuzz(self, ctx={}, description=None):
history_file=self.history_file,
scan_id=self.scan_id,
activity_id=self.activity_id):

# Empty line, continue to the next record
if not isinstance(line, dict):
continue

# Append line to results
results.append(line)
name = line['input'].get('FUZZ')

# Retrieve FFUF output
url = line['url']
# Extract path and convert to base64 (need byte string encode & decode)
name = base64.b64encode(extract_path_from_url(url).encode()).decode()
length = line['length']
status = line['status']
words = line['words']
url = line['url']
lines = line['lines']
content_type = line['content-type']
duration = line['duration']

# If name empty log error and continue
if not name:
logger.error(f'FUZZ not found for "{url}"')
continue

# Get or create endpoint from URL
endpoint, created = save_endpoint(url, crawl=False, ctx=ctx)

# Continue to next line if endpoint returned is None
if endpoint == None:
continue

# Save endpoint data from FFUF output
endpoint.http_status = status
endpoint.content_length = length
endpoint.response_time = duration / 1000000000
endpoint.save()
if created:
urls.append(endpoint.http_url)
endpoint.status = status
endpoint.content_type = content_type
endpoint.content_length = length
endpoint.save()

# Save directory file output from FFUF output
dfile, created = DirectoryFile.objects.get_or_create(
name=name,
length=length,
words=words,
lines=lines,
content_type=content_type,
url=url)
dfile.http_status = status
dfile.save()
# if created:
# logger.warning(f'Found new directory or file {url}')
url=url,
http_status=status)

# Log newly created file or directory if debug activated
if created and DEBUG:
logger.warning(f'Found new directory or file {url}')

# Add file to current dirscan
dirscan.directory_files.add(dfile)
dirscan.save()

# Add subscan relation to dirscan if exists
if self.subscan:
dirscan.dir_subscan_ids.add(self.subscan)

subdomain_name = get_subdomain_from_url(endpoint.http_url)
subdomain = Subdomain.objects.get(name=subdomain_name, scan_history=self.scan)
# Save dirscan datas
dirscan.save()

# Get subdomain and add dirscan
if ctx['subdomain_id'] > 0:
subdomain = Subdomain.objects.get(id=ctx['subdomain_id'])
else:
subdomain_name = get_subdomain_from_url(endpoint.http_url)
subdomain = Subdomain.objects.get(name=subdomain_name, scan_history=self.scan)
subdomain.directories.add(dirscan)
subdomain.save()

Expand Down

0 comments on commit aff8040

Please sign in to comment.