(1)该脚本旨在攻防中,批量的请求url,获取页面截图,以便初步判断脆弱资产。
(2)脚本运行后,会在py文件所在的文件夹下创建screenshots文件夹,并把截图保存到该文件夹下,最后会生成html文档,以便快速筛选可能脆弱的资产
运行截图:
import os
import re
import time
import random
import concurrent.futures
from tqdm import tqdm
from queue import PriorityQueue
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
def get_driver():
return webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=chrome_options)
output_folder = "screenshots"
os.makedirs(output_folder, exist_ok=True)
html_file = "screenshots_report.html"
with open(html_file, "w", encoding="utf-8") as f:
f.write("URL Screenshot Report\n")
f.write("\n") f.write("URL | Screenshot |
\n")
def generate_filename(url):
timestamp = time.strftime("%Y%m%d-%H%M%S")
random_number = random.randint(1000, 9999)
file_name = f"screenshot_{random_number}_{timestamp}.png"
return file_name
def capture_screenshot(url, priority_queue):
driver = get_driver()
start_time = time.time()
try:
driver.get(url)
print(f"正在访问 {url}...")
file_name = generate_filename(url)
file_path = os.path.join(output_folder, file_name)
driver.save_screenshot(file_path)
print(f"已保存截图:{file_path}")
with open(html_file, "a", encoding="utf-8") as f:
f.write(f"{url} | |
\n")
end_time = time.time()
response_time = end_time - start_time
priority_queue.put((response_time, url))
return (url, file_path)
finally:
driver.quit()
def prioritize_and_process_urls(urls):
priority_queue = PriorityQueue()
for url in urls:
priority_queue.put((0, url))
max_threads = 10
with concurrent.futures.ThreadPoolExecutor(max_threads) as executor:
futures = []
progress = tqdm(total=len(urls), desc="Processing URLs")
while not priority_queue.empty():
priority, url = priority_queue.get()
future = executor.submit(capture_screenshot, url, priority_queue)
futures.append(future)
progress.update(1)
concurrent.futures.wait(futures)
progress.close()
with open("targets.txt", "r", encoding="utf-8") as f:
urls = [line.strip() for line in f if line.strip()]
prioritize_and_process_urls(urls)
with open(html_file, "a", encoding="utf-8") as f:
f.write("
\n")
print(f"报告已生成:{html_file}")
黑白之道发布、转载的文章中所涉及的技术、思路和工具仅供以安全为目的的学习交流使用,任何人不得将其用于非法用途及盈利等目的,否则后果自行承担!
如侵权请私聊我们删文