-Kotix- · 10-Авг-24 01:28(1 год 3 месяца назад, ред. 03-Ноя-25 16:18)
Retro Sound / Game Music Collection - Arcade / Atari / NES / Sega / Sharp X68000 / SMS / SNES / C64 Collection (NSF, NSFE, VGM, SID) Жанр: Score Композитор: VA Год выпуска диска: 2024 Аудиокодек: NSF, NSFE, VGM, SID Тип рипа: tracks Источник: WEB Наличие сканов в содержимом раздачи: нет Сборник для ценителей классической игровой музыки на таких консолях как NES, Sega, SNES и др.
Расширенная версия NSF с дополнительными метаданными (название треков, композитор, длительность для каждого трека). Сборник NSFe_Collection by NikoTengoku (Niko) and Anonymotron (Lance) - April 26th, 2023 - самый актуальный сборник NSFE, заботливо перепроверенный и собранный руками NikoTengoku и Anonymotron. Источник: mega. MrNorbert1994 - один из немногих кто время от времени делает новые NSF. Его архив в раздачу включать не стал, т.к. слишком много всего и непонятно в каком состоянии.
Ветка обсуждения сборника на theshizz ссылка.
В стародавние времена я поучаствовал в обсуждении по стандартизации именования треков и сделал штук 10 NSFE
SPC (Super Nintendo Sound Format)
Формат для хранения музыки из игр на Super Nintendo Entertainment System (SNES).
Сборник SNESmusic.org.
Источник: snesmusic
VGM (Video Game Music)
Универсальный формат для хранения музыки из игр на различных консолях, поддерживает множество звуковых чипов.
Сборник Project2612 Complete Archive (2021-07-12)[704 sets]. Источник: archive.org
В это сборнике музыка из Sega Genesis / Sega Mega Drive. Сборник VGMRips_all_of_them_2024-05-18. Источник: vgmrips
В этом сборнике всё подряд: Arcade, GameBoy, Mega Drive, NeoGeo, NeoGeo Pocket, NES, SegaPico, TurboGrafx, WonderSwan.
Источник: https://www.smspower.org/Music/VGMs Нет возможности скачать все файлы одним архивом с этого сайта (а где ещё можно скачать не нашёл), поэтому запилил python код для скачки:
Python код для скачки с https://www.smspower.org/Music/VGMs
Код:
import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, unquote
import re
import sys # Import sys for encoding stdout # --- Configuration ---
BASE_URL = "https://www.smspower.org"
INDEX_URL = urljoin(BASE_URL, "/Music/VGMs")
DOWNLOAD_FOLDER = "sms_music_zips" def sanitize_filename(filename):
"""
Removes characters that are illegal in Windows filenames.
"""
return re.sub(r'[\\/*?:"<>|]', "", filename) def safe_print(message):
"""
Prints a message to the console, handling potential UnicodeEncodeErrors.
"""
try:
print(message)
except UnicodeEncodeError:
# If direct print fails, try encoding to the console's encoding
# and replacing unencodable characters.
print(message.encode(sys.stdout.encoding, errors='replace').decode(sys.stdout.encoding)) def get_soup(url):
"""
Fetches a URL and returns a BeautifulSoup object, or None on error.
"""
try:
response = requests.get(url, timeout=15)
response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx)
return BeautifulSoup(response.text, 'html.parser')
except requests.exceptions.RequestException as e:
safe_print(f"Error fetching {url}: {e}")
return None def process_game_page(game_url):
"""
Processes a single game page to find composers and the download link.
"""
safe_print(f"\nProcessing game page: {game_url}")
soup = get_soup(game_url)
if not soup:
return # --- Find Composers ---
composers = []
# Find the <dt> tag that contains "Credits"
credits_dt = soup.find('dt', string='Credits')
if credits_dt:
# Find the <dd> tag that immediately follows the "Credits" <dt>
credits_dd = credits_dt.find_next_sibling('dd')
if credits_dd:
# Find all <a> tags with class 'wikilink' within this <dd>
composer_links = credits_dd.find_all('a', class_='wikilink')
composers = [link.text.strip() for link in composer_links] # If no composers were found, set to "Unknown"
if not composers:
composers_str = "Unknown"
safe_print(" -> Could not find composers. Setting to 'Unknown'.")
else:
composers_str = ", ".join(composers)
safe_print(f" -> Found composers: {composers_str}") # --- Find Download Link ---
download_link = soup.find('a', class_='attachlink')
if not download_link or not download_link.has_attr('href'):
safe_print(" -> Could not find a valid download link. Skipping.")
return zip_url = urljoin(BASE_URL, download_link['href'])
safe_print(f" -> Found download link: {zip_url}") # --- Download and Rename File ---
try:
# Get original filename from URL and decode it (e.g., %20 -> space)
original_filename_encoded = os.path.basename(unquote(zip_url))
original_filename_base, extension = os.path.splitext(original_filename_encoded) # Create the new filename
new_filename_base = f"{original_filename_base} by {composers_str}"
# Sanitize for file system safety
sanitized_filename = sanitize_filename(new_filename_base) + extension # Create download folder if it doesn't exist
if not os.path.exists(DOWNLOAD_FOLDER):
os.makedirs(DOWNLOAD_FOLDER)
safe_print(f"Created download folder: {DOWNLOAD_FOLDER}") filepath = os.path.join(DOWNLOAD_FOLDER, sanitized_filename) # Check if file already exists to avoid re-downloading
if os.path.exists(filepath):
safe_print(f" -> File already exists: {sanitized_filename}. Skipping.")
return safe_print(f" -> Downloading to: {filepath}")
zip_response = requests.get(zip_url, timeout=30)
zip_response.raise_for_status() with open(filepath, 'wb') as f:
f.write(zip_response.content) safe_print(" -> Download complete.") except requests.exceptions.RequestException as e:
safe_print(f" -> Failed to download file: {e}")
except IOError as e:
safe_print(f" -> Failed to save file: {e}") def main():
"""
Main function to orchestrate the scraping process.
"""
safe_print(f"Starting scraper for: {INDEX_URL}")
index_soup = get_soup(INDEX_URL)
if not index_soup:
safe_print("Could not fetch the main index page. Aborting.")
return # Find all links within elements with class 'fpltemplate'
game_links = index_soup.select('.fpltemplate a') if not game_links:
safe_print("No game links found on the index page. Check the selector '.fpltemplate a'.")
return safe_print(f"Found {len(game_links)} game links to process.") for link in game_links:
if link.has_attr('href'):
# Construct the full, absolute URL for the game page
game_page_url = urljoin(BASE_URL, link['href'])
process_game_page(game_page_url) safe_print("\nScraping process finished.") if __name__ == "__main__":
main()
Python код для скачки с https://www.smspower.org/Music/Homebrew
Код:
import os
import re
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, unquote # --- Configuration ---
BASE_URL = "https://www.smspower.org"
# Target URL for the Homebrew music page
INDEX_URL = urljoin(BASE_URL, "/Music/Homebrew")
# Download folder for the new content
DOWNLOAD_FOLDER = "sms_homebrew_music" def sanitize_filename(filename):
"""
Removes characters that are illegal in many filesystems.
"""
return re.sub(r'[\\/*?:"<>|]', "", filename) def safe_print(text):
"""Prints text, falling back to ASCII with replacements if Unicode fails."""
try:
print(text)
except UnicodeEncodeError:
# On some systems (like Windows default console), printing Unicode can fail.
# This encodes the string to ASCII, replacing unsupported characters with '?'.
print(text.encode('ascii', 'replace').decode('ascii')) def get_soup(url):
"""
Fetches a URL and returns a BeautifulSoup object, or None on error.
"""
try:
response = requests.get(url, timeout=15)
response.raise_for_status() # Raise an exception for bad status codes
return BeautifulSoup(response.text, 'html.parser')
except requests.exceptions.RequestException as e:
safe_print(f"Error fetching {url}: {e}")
return None def process_table_row(row_soup):
"""
Processes a single table row (<tr>) to find the file info and download the .vgm or .mp3 file.
"""
# A valid data row should have exactly 4 cells (td)
cells = row_soup.find_all('td')
if len(cells) != 4:
return # Skip header, footer, or malformed rows # --- Cell 4: Find Download Link (.vgm or .mp3) ---
# We only process rows with a .vgm or .mp3 file link.
download_link_tag = cells[3].find('a', href=re.compile(r'\.*', re.I))
if not download_link_tag or not download_link_tag.has_attr('href'):
return # Skip this row if it's not a VGM or MP3 file download_url = urljoin(BASE_URL, download_link_tag['href'])
original_filename = os.path.basename(unquote(download_url))
safe_print(f"\nProcessing file: {original_filename}") # --- Cell 2: Extract Artist(s) and Title ---
cell_2 = cells[1]
# Find all linked artists in the cell
artist_tags = cell_2.find_all('a', class_='forumuserlink')
if artist_tags:
artists = [tag.text.strip() for tag in artist_tags]
artist = ", ".join(artists)
else:
artist = "Unknown Artist" title = "Untitled"
# The title is in a span with a specific font size, inside the second cell
title_span = cell_2.find('span', style=re.compile(r'font-size'))
if title_span:
title_link = title_span.find('a')
if title_link:
title = title_link.text.strip() # --- Cell 3: Extract Year and Notes ---
cell_3 = cells[2]
year = "XXXX" # Default value if no year is found
notes = "" cell_text = cell_3.get_text(separator=" ", strip=True)
# Find year like 2005/03/27 and extract '2005'
year_match = re.search(r'(\d{4})/\d{2}/\d{2}', cell_text)
if year_match:
year = year_match.group(1) # Find notes like "Based on <link>"
if "Based on" in cell_text:
based_on_link = cell_3.find('a', class_='wikilink')
if based_on_link:
notes = f"Based on {based_on_link.text.strip()}" safe_print(f" -> Artist: {artist}, Year: {year}, Title: {title}")
if notes:
safe_print(f" -> Notes: {notes}") # --- Construct New Filename ---
filename_parts = [artist, year, title]
new_filename_base = " - ".join(str(part) for part in filename_parts)
if notes:
new_filename_base += f" ({notes})" # Get the extension from the original URL
_, extension = os.path.splitext(original_filename)
sanitized_filename = sanitize_filename(new_filename_base) + extension # --- Download and Save File ---
try:
if not os.path.exists(DOWNLOAD_FOLDER):
os.makedirs(DOWNLOAD_FOLDER)
print(f"Created download folder: {DOWNLOAD_FOLDER}") filepath = os.path.join(DOWNLOAD_FOLDER, sanitized_filename) if os.path.exists(filepath):
safe_print(f" -> File already exists: {sanitized_filename}. Skipping.")
return safe_print(f" -> Downloading to: {filepath}")
vgm_response = requests.get(download_url, timeout=30)
vgm_response.raise_for_status() with open(filepath, 'wb') as f:
f.write(vgm_response.content) print(" -> Download complete.") except requests.exceptions.RequestException as e:
safe_print(f" -> Failed to download file: {e}")
except IOError as e:
safe_print(f" -> Failed to save file: {e}") def main():
"""
Main function to orchestrate the scraping process.
"""
print(f"Starting scraper for: {INDEX_URL}")
index_soup = get_soup(INDEX_URL)
if not index_soup:
print("Could not fetch the main index page. Aborting.")
return # CORRECTED: Find the main content div, then find all fpltemplate containers within it.
wikitext_div = index_soup.find('div', id='wikitext')
if not wikitext_div:
print("Could not find the main content container (div#wikitext). Aborting.")
return music_containers = wikitext_div.find_all('div', class_='fpltemplate')
if not music_containers:
print("Could not find any music containers (div.fpltemplate) on the page. Aborting.")
return all_rows = []
for container in music_containers:
music_table = container.find('table')
if music_table:
all_rows.extend(music_table.find_all('tr')) if not all_rows:
print("No table rows found in any of the music tables.")
return print(f"Found {len(all_rows)} total table rows to check for music files.") for row in all_rows:
process_table_row(row) print("\nScraping process finished.") if __name__ == "__main__":
main()
Как минимум еще можно добавить:
Сборник Game Boy музыки: http://www.snesmusic.org/pmh/ (нет возможности скачать целиком) Пишите какие ещё сборники стоит добавить, любая помощь приветствуется.
Знаю, что название раздачи Retro Sound Collection очень общее, лучше придумать не смог.Чем это можно воспроизвести?
Информация о плеерах на Chipwiki и Zophar.net
Неплохо бы ещё сюда добавить sndh от Atari ST.
Ну и спека, CPC и Atari 800\XL до кучи, правда где это всё брать я ужэ не могу сказать. Лично я всё это добро с модленда драл цать лет назад по форматам, а не платформам.
88327515Но там какая-то мешанина всего и вся, долго разбираться что там есть ценного
Там для hoot'а, а hoot чуть ли не единственный плеер для PC-88/98, ну и бонусом там fm7, x1, x68k, плюс ещё куча всего, но оно ужэ там не нужно.
Раньше по http://snesmusic.org/hoot/v2/ была огроменная коллекция оформленная хоть и не ахти как удобно, но с названиями контор/игр и прочего, а теперь всё снесли. Осталось вот у джоша только на отвали, хрен разберёшь
Обновил хут, слухаю тут киберблок - https://www.youtube.com/watch?v=SFKi02h4q4A
До кучи выкачал весь его архив с x68k.Кста, затестил KbMedia Player, который для Sharp X68000. Так вот, куда сэмплы кидать\прописать, а то ударников нету?