mirror of
https://github.com/KevinMidboe/spotify-downloader.git
synced 2025-10-29 18:00:15 +00:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b13f12f1fe | ||
|
|
19ae8fd408 | ||
|
|
89735c2bbb | ||
|
|
debe7ee902 | ||
|
|
9c97f33aa2 | ||
|
|
046e7e9d3c | ||
|
|
29b1f31a26 | ||
|
|
64d54d7943 | ||
|
|
85c12a91ef | ||
|
|
9795d7e9b8 | ||
|
|
bbe43da191 |
21
CHANGES.md
21
CHANGES.md
@@ -8,9 +8,28 @@ The release dates mentioned follow the format `DD-MM-YYYY`.
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [2.0.4] - 19-05-2020
|
||||
### Fixed
|
||||
- Do not remove the currently downloading track from file on `KeyboardInterrupt`
|
||||
when `--list` is passed. ([@ritiek](https://github.com/ritiek/spotify-downloader)) (#722)
|
||||
- Failure on invoking spotdl if FFmpeg isn't found. It should now warn about missing
|
||||
FFmpeg and move ahead without encoding. ([@ritiek](https://github.com/ritiek))
|
||||
(debe7ee9024e2ec65eed9935460c62f4eecd03ea)
|
||||
|
||||
## [2.0.3] (Hotfix Release) - 18-05-2020
|
||||
### Fixed
|
||||
- Genius would sometimes return invalid lyrics. Retry a few times in such a case.
|
||||
([@ritiek](https://github.com/ritiek)) (29b1f31a2622f749df83c3072c4cbb22615bff95)
|
||||
|
||||
## [2.0.2] (Hotfix Release) - 18-05-2020
|
||||
### Fixed
|
||||
- Skipping tracks with `-m` would crash. ([@ritiek](https://github.com/ritiek))
|
||||
(bbe43da191093302726ddc9a48f0fa0a55be6fb6)
|
||||
|
||||
## [2.0.1] (Hotfix Release) - 18-05-2020
|
||||
### Fixed
|
||||
- `-o m4a` would always fail.
|
||||
- `-o m4a` would always fail. ([@ritiek](https://github.com/ritiek))
|
||||
(cd5f224e379f3feefc95e338ec50674f976e2e89)
|
||||
|
||||
## [2.0.0] - 18-05-2020
|
||||
### Migrating from v1.2.6 to v2.0.0
|
||||
|
||||
@@ -321,7 +321,7 @@ class Spotdl:
|
||||
|
||||
def download_tracks_from_file(self, path):
|
||||
logger.info(
|
||||
"Checking and removing any duplicate tracks in {}.".format(path)
|
||||
'Checking and removing any duplicate tracks in "{}".'.format(path)
|
||||
)
|
||||
tracks = spotdl.util.readlines_from_nonbinary_file(path)
|
||||
tracks = self.strip_and_filter_duplicates(tracks)
|
||||
@@ -346,12 +346,12 @@ class Spotdl:
|
||||
yt_search_format=self.arguments["search_format"],
|
||||
yt_manual=self.arguments["manual"]
|
||||
)
|
||||
log_track_query = '{position}. Downloading "{track}"'.format(
|
||||
position=position,
|
||||
track=track
|
||||
)
|
||||
logger.info(log_track_query)
|
||||
try:
|
||||
log_track_query = '{position}. Downloading "{track}"'.format(
|
||||
position=position,
|
||||
track=track
|
||||
)
|
||||
logger.info(log_track_query)
|
||||
metadata = search_metadata.on_youtube_and_spotify()
|
||||
self.download_track_from_metadata(metadata)
|
||||
except (urllib.request.URLError, TypeError, IOError) as e:
|
||||
@@ -363,6 +363,11 @@ class Spotdl:
|
||||
tracks.append(track)
|
||||
except (NoYouTubeVideoFoundError, NoYouTubeVideoMatchError) as e:
|
||||
logger.error("{err}".format(err=e.args[0]))
|
||||
except KeyboardInterrupt:
|
||||
# The current track hasn't been downloaded completely.
|
||||
# Make sure we continue from here the next the program runs.
|
||||
tracks.insert(0, track)
|
||||
raise
|
||||
else:
|
||||
if self.arguments["write_successful_file"]:
|
||||
with open(self.arguments["write_successful_file"], "a") as fout:
|
||||
|
||||
@@ -37,12 +37,12 @@ class EncoderBase(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self, encoder_path, loglevel, additional_arguments=[]):
|
||||
def __init__(self, encoder_path, must_exist, loglevel, additional_arguments=[]):
|
||||
"""
|
||||
This method must make sure whether specified encoder
|
||||
is available under PATH.
|
||||
"""
|
||||
if shutil.which(encoder_path) is None:
|
||||
if must_exist and shutil.which(encoder_path) is None:
|
||||
raise EncoderNotFoundError(
|
||||
"{} executable does not exist or was not found in PATH.".format(
|
||||
encoder_path
|
||||
|
||||
@@ -27,11 +27,11 @@ RULES = {
|
||||
|
||||
|
||||
class EncoderFFmpeg(EncoderBase):
|
||||
def __init__(self, encoder_path="ffmpeg"):
|
||||
def __init__(self, encoder_path="ffmpeg", must_exist=True):
|
||||
_loglevel = "-hide_banner -nostats -v panic"
|
||||
_additional_arguments = ["-b:a", "192k", "-vn"]
|
||||
try:
|
||||
super().__init__(encoder_path, _loglevel, _additional_arguments)
|
||||
super().__init__(encoder_path, must_exist, _loglevel, _additional_arguments)
|
||||
except EncoderNotFoundError as e:
|
||||
raise FFmpegNotFoundError(e.args[0])
|
||||
self._rules = RULES
|
||||
|
||||
@@ -48,14 +48,12 @@ class Genius(LyricBase):
|
||||
else:
|
||||
return response.read()
|
||||
|
||||
def _get_lyrics_text(self, html):
|
||||
def _get_lyrics_text(self, paragraph):
|
||||
"""
|
||||
Extracts and returns the lyric content from the provided HTML.
|
||||
"""
|
||||
soup = BeautifulSoup(html, "html.parser")
|
||||
lyrics_paragraph = soup.find("p")
|
||||
if lyrics_paragraph:
|
||||
return lyrics_paragraph.get_text()
|
||||
if paragraph:
|
||||
return paragraph.get_text()
|
||||
else:
|
||||
raise LyricsNotFoundError(
|
||||
"The lyrics for this track are yet to be released on Genius."
|
||||
@@ -81,6 +79,7 @@ class Genius(LyricBase):
|
||||
"""
|
||||
encoded_query = urllib.request.quote(query.replace(" ", "+"))
|
||||
search_url = self.base_search_url + encoded_query
|
||||
logger.debug('Fetching Genius search results from "{}".'.format(search_url))
|
||||
metadata = self._fetch_search_page(search_url)
|
||||
|
||||
lyric_url = None
|
||||
@@ -105,6 +104,7 @@ class Genius(LyricBase):
|
||||
Returns the lyric string for the track best matching the
|
||||
given query.
|
||||
"""
|
||||
logger.debug('Fetching lyrics for the search query on "{}".'.format(query))
|
||||
try:
|
||||
lyric_url = self.best_matching_lyric_url_from_query(query)
|
||||
except LyricsNotFoundError:
|
||||
@@ -121,13 +121,31 @@ class Genius(LyricBase):
|
||||
result.
|
||||
"""
|
||||
lyric_url = self.guess_lyric_url_from_artist_and_track(artist, track)
|
||||
return self.from_url(lyric_url, linesep, timeout)
|
||||
return self.from_url(lyric_url, linesep, timeout=timeout)
|
||||
|
||||
def from_url(self, url, linesep="\n", timeout=None):
|
||||
def from_url(self, url, linesep="\n", retries=5, timeout=None):
|
||||
"""
|
||||
Returns the lyric string for the given URL.
|
||||
"""
|
||||
logger.debug('Fetching lyric text from "{}".'.format(url))
|
||||
lyric_html_page = self._fetch_url_page(url, timeout=timeout)
|
||||
lyrics = self._get_lyrics_text(lyric_html_page)
|
||||
soup = BeautifulSoup(lyric_html_page, "html.parser")
|
||||
paragraph = soup.find("p")
|
||||
# If <p> has a class (like <p class="bla">), then we got an invalid
|
||||
# response. Retry in such a case.
|
||||
invalid_response = paragraph.get("class") is not None
|
||||
to_retry = retries > 0 and invalid_response
|
||||
if to_retry:
|
||||
logger.debug(
|
||||
"Retrying since Genius returned invalid response for search "
|
||||
"results. Retries left: {retries}.".format(retries=retries)
|
||||
)
|
||||
return self.from_url(url, linesep=linesep, retries=retries-1, timeout=timeout)
|
||||
|
||||
if invalid_response:
|
||||
raise LyricsNotFoundError(
|
||||
'Genius returned invalid response for the search URL "{}".'.format(url)
|
||||
)
|
||||
lyrics = self._get_lyrics_text(paragraph)
|
||||
return lyrics.replace("\n", linesep)
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ class YouTubeSearch:
|
||||
quoted_query = urllib.request.quote(query)
|
||||
return self.base_search_url.format(quoted_query)
|
||||
|
||||
def _fetch_response_html(self, url, retries=5):
|
||||
def _fetch_response_html(self, url):
|
||||
response = urllib.request.urlopen(url)
|
||||
soup = BeautifulSoup(response.read(), "html.parser")
|
||||
return soup
|
||||
@@ -119,12 +119,11 @@ class YouTubeSearch:
|
||||
videos = self._fetch_search_results(html, limit=limit)
|
||||
to_retry = retries > 0 and self._is_server_side_invalid_response(videos, html)
|
||||
if to_retry:
|
||||
retries -= 1
|
||||
logger.debug(
|
||||
"Retrying since YouTube returned invalid response for search "
|
||||
"results. Retries left: {retries}.".format(retries=retries)
|
||||
)
|
||||
return self.search(query, limit=limit, retries=retries)
|
||||
return self.search(query, limit=limit, retries=retries-1)
|
||||
return YouTubeVideos(videos)
|
||||
|
||||
|
||||
|
||||
@@ -163,7 +163,7 @@ class MetadataSearch:
|
||||
if video is None:
|
||||
raise NoYouTubeVideoMatchError(
|
||||
'No matching videos found on YouTube for the search query "{}".'.format(
|
||||
search_query
|
||||
query
|
||||
)
|
||||
)
|
||||
return video
|
||||
|
||||
@@ -44,7 +44,7 @@ class Track:
|
||||
return progress_bar
|
||||
|
||||
def download_while_re_encoding(self, stream, target_path, target_encoding=None,
|
||||
encoder=EncoderFFmpeg(), show_progress=True):
|
||||
encoder=EncoderFFmpeg(must_exist=False), show_progress=True):
|
||||
total_chunks = self.calculate_total_chunks(stream["filesize"])
|
||||
process = encoder.re_encode_from_stdin(
|
||||
stream["encoding"],
|
||||
@@ -80,7 +80,7 @@ class Track:
|
||||
writer(response, progress_bar, file_io)
|
||||
|
||||
def re_encode(self, input_path, target_path, target_encoding=None,
|
||||
encoder=EncoderFFmpeg(), show_progress=True):
|
||||
encoder=EncoderFFmpeg(must_exist=False), show_progress=True):
|
||||
stream = self.metadata["streams"].getbest()
|
||||
total_chunks = self.calculate_total_chunks(stream["filesize"])
|
||||
process = encoder.re_encode_from_stdin(
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
__version__ = "2.0.1"
|
||||
__version__ = "2.0.4"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user