diff --git a/generate.py b/generate.py index 44e202e..9751b83 100644 --- a/generate.py +++ b/generate.py @@ -1,13 +1,10 @@ import torchaudio -import time from audiocraft.models.musicgen import MusicGen from audiocraft.data.audio import audio_write MODEL_NAME = "facebook/musicgen-large" MUSIC_DURATION_SECONDS = 60 -print(f"getting {MODEL_NAME}...") - model = MusicGen.get_pretrained(MODEL_NAME) model.set_generation_params(duration=MUSIC_DURATION_SECONDS) descriptions = [ @@ -15,25 +12,20 @@ descriptions = [ "calm, piano lo-fi beats to help with studying and focusing", "gentle lo-fi hip-hop to relax to", "gentle, quiet synthwave lo-fi beats", - "morning lo-fi beats" + "morning lo-fi beats", ] -print("model obtained. generating wav files...") -a = time.time() +def generate(offset=0): + wav = model.generate(descriptions) -wav = model.generate(descriptions) - -b = time.time() - -print(f"{len(wav)} generated. took {b - a} seconds.") - -for idx, one_wav in enumerate(wav): - # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. - audio_write( - f"{idx}", - one_wav.cpu(), - model.sample_rate, - strategy="loudness", - loudness_compressor=True, - ) + for idx, one_wav in enumerate(wav): + # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. + audio_write( + f"{idx + offset}", + one_wav.cpu(), + model.sample_rate, + format="mp3", + strategy="loudness", + loudness_compressor=True, + ) diff --git a/generate_manual.py b/generate_manual.py new file mode 100644 index 0000000..fd642cd --- /dev/null +++ b/generate_manual.py @@ -0,0 +1,38 @@ +import torchaudio +import time +from audiocraft.models.musicgen import MusicGen +from audiocraft.data.audio import audio_write + +MODEL_NAME = "facebook/musicgen-large" +MUSIC_DURATION_SECONDS = 60 + +print("obtaining model...") + +model = MusicGen.get_pretrained(MODEL_NAME) +model.set_generation_params(duration=MUSIC_DURATION_SECONDS) +descriptions = [ + "gentle, calming lo-fi beats that helps with studying and focusing", + "calm, piano lo-fi beats to help with studying and focusing", + "gentle lo-fi hip-hop to relax to", + "gentle, quiet synthwave lo-fi beats", + "morning lo-fi beats", +] + +print("model obtained. generating audio...") + +a = time.time() +wav = model.generate(descriptions) +b = time.time() + +print(f"audio generated. took {b - a} seconds.") + +for idx, one_wav in enumerate(wav): + # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. + audio_write( + f"{idx}", + one_wav.cpu(), + model.sample_rate, + format="mp3", + strategy="loudness", + loudness_compressor=True, + ) diff --git a/server.py b/server.py new file mode 100644 index 0000000..2c3f332 --- /dev/null +++ b/server.py @@ -0,0 +1,30 @@ +import threading +from .generate import generate +from fastapi import FastAPI +from fastapi.responses import FileResponse +from fastapi.staticfiles import StaticFiles + +app = FastAPI() +current_index = 0 + +app.mount("/", StaticFiles(directory="web", html=True), name="web") + + +def advance(): + global current_index + + # if current_index == 0: + # generate(offset=5) + # elif current_index == 5: + # generate(offset=0) + + if current_index == 9: + current_index = 0 + else: + current_index = current_index + 1 + + t = threading.Timer(60, advance) + t.start() + + +advance() diff --git a/web/index.html b/web/index.html new file mode 100644 index 0000000..42b9237 --- /dev/null +++ b/web/index.html @@ -0,0 +1,12 @@ + + + + infinifi + + +
+

test

+
+ + + diff --git a/web/script.js b/web/script.js new file mode 100644 index 0000000..e69de29