裏・メモとか雑記とか♪の掲示板
-
16219
>>16218
# Example: Extracting "Revenue" and "OperatingIncomeLoss" elements
revenue = root.find('.//ix:nonfraction[@name="Revenues"]', namespaces)
operating_income = root.find('.//ix:nonfraction[@name="OperatingIncomeLoss"]', namespaces)
results['Revenue'] = revenue.text if revenue is not None else None
results['OperatingIncome'] = operating_income.text if operating_income is not None else None
return results
if __name__ == "__main__":
# Example CIK (Central Index Key) for a company
cik = '0000320193' # Replace with the CIK of the desired company
xbrl_links = download_xbrl_data(cik)
if xbrl_links:
xbrl_url = xbrl_links[0]
xbrl_content = extract_xbrl_data(xbrl_url)
# Example namespaces for XBRL elements
namespaces = {
'xbrl': 'http://www.xbrl.org/2003/instance',
'ix': 'http://www.xbrl.org/2013/inlineXBRL'
# Add more namespaces as needed
}
カニだよカニ(蟹江天直) 1月15日 13:00
>>16217
if response.status_code == 200:
soup = BeautifulSoup(response.content, 'xml')
entries = soup.find_all('entry')
xbrl_links = [entry.link.href.text for entry in entries if 'xbrl' in entry.link.type.text.lower()]
return xbrl_links
else:
print(f"Failed to fetch XBRL data. Status code: {response.status_code}")
return []
def extract_xbrl_data(xbrl_url):
response = requests.get(xbrl_url)
with ZipFile(BytesIO(response.content)) as zip_file:
# Assuming there is only one XBRL file in the ZIP archive
xbrl_file = zip_file.namelist()[0]
xbrl_content = zip_file.read(xbrl_file)
return xbrl_content
def parse_xbrl_data(xbrl_content, namespaces):
root = ET.fromstring(xbrl_content)
results = {}
for key, value in namespaces.items():
ET.register_namespace(key, value)