User:DreamRimmer/commonsfileusage.py

"""

Copyright (c) 2025 DreamRimmer

Permission is hereby granted, free of charge, to any person obtaining a copy

of this software and associated documentation files (the "Software"), to deal

in the Software without restriction, including without limitation the rights

to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

copies of the Software, and to permit persons to whom the Software is

furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all

copies or substantial portions of the Software.

"""

import requests, time, json

headers = {"User-Agent": "User:DreamRimmer (email:dreamrimmer.wikimedian@gmail.com) :en:WP:AIIMAGE"}

session = requests.Session()

session.headers.update(headers)

  1. retrieve files from category

def get_files(cat_name, cont_token=None):

url = "https://commons.wikimedia.org/w/api.php"

params = {

"action": "query",

"list": "categorymembers",

"cmtitle": cat_name,

"cmtype": "file|subcat",

"cmlimit": "max",

"format": "json",

"maxlag": 5 # respect api

}

if cont_token:

params["cmcontinue"] = cont_token

response = session.get(url, params=params)

try:

return response.json()

except json.JSONDecodeError:

return None

  1. usage data for the files

def get_usage(file_names):

url = "https://commons.wikimedia.org/w/api.php"

params = {

"action": "query",

"format": "json",

"prop": "globalusage",

"titles": "|".join(file_names),

"formatversion": "2",

"guprop": "url|namespace",

"gunamespace": "0",

"gusite": "enwiki",

"maxlag": 5

}

response = session.get(url, params=params)

try:

return response.json()

except json.JSONDecodeError:

return None

def process_cat(cat_name, processed_files=None, output_file=None):

if processed_files is None:

processed_files = set()

cont_token = None

while True:

data = get_files(cat_name, cont_token)

if data is None:

break

members = data.get("query", {}).get("categorymembers", [])

files = [member["title"] for member in members if member["ns"] == 6 and member["title"] not in processed_files]

subcats = [member["title"] for member in members if member["ns"] == 14]

for i in range(0, len(files), 50): # process files in batches of 50

batch = files[i:i+50]

usage_data = get_usage(batch)

if usage_data is None:

continue

pages = usage_data.get('query', {}).get('pages', [])

for page in pages:

if 'globalusage' in page:

for usage in page['globalusage']:

result = f"* :{page['title']} - {usage['title'].replace('_', ' ')}"

print(result)

if output_file:

output_file.write(result + "\n")

processed_files.update(batch)

time.sleep(5) # respect api

for subcat in subcats:

process_cat(subcat, processed_files, output_file)

if "continue" in data:

cont_token = data["continue"]["cmcontinue"]

else:

break

cat_name = "Category:AI-generated media" #category to check

  1. save output

with open("files.txt", "w", encoding="utf-8") as output_file:

process_cat(cat_name, output_file=output_file)