Skip to content

Commit

Permalink
Fix: Fixed issue with fetching tracks
Browse files Browse the repository at this point in the history
  • Loading branch information
Peter-Immanuel committed Dec 26, 2023
1 parent 9981144 commit f9ce411
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 15 deletions.
11 changes: 8 additions & 3 deletions engine/root.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,12 @@ def parse_single_object(self, object=None,**kwargs):
#Each engine should have its own fetch single objects defined"""
raise NotImplementedError()

def get_formated_url(self,url=None,path=None,page=None, category=None, query=None, method=None,params=None, **kwargs):
def get_formated_url(
self,url=None,path=None,
page=None, category=None,
query=None, method=None,
params=None, **kwargs
):
"""
Return a formatted Music Engine search or fetch url
"""
Expand All @@ -102,7 +107,7 @@ def get_formated_url(self,url=None,path=None,page=None, category=None, query=Non
else url._replace(path=url_path)
)

print("\nFORMATED URL: "+self.formated_url.geturl())
# print("\nFORMATED URL: "+self.formated_url.geturl())
return self.formated_url.geturl()


Expand All @@ -118,7 +123,7 @@ def get_response_object(self,url,method=None,payload=None,header=None,**kwargs):
:header: dict -> The request header
:return: Html source code or Json of a given URL.
"""
print("\n\n\nRESPONSE FROM URL : "+url)
# print("\n\n\nRESPONSE FROM URL : "+url)

# Get header and method either passed into the get_response_object or globally set
header= helpers.get_header() if header is None else header
Expand Down
30 changes: 18 additions & 12 deletions engine/songslover.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import re
from engine.root import BaseEngine
from tqdm import tqdm

class SongsLover(BaseEngine):

Expand Down Expand Up @@ -53,7 +54,7 @@ def parse_parent_object(self, soup,**kwargs):
self.get_response_object(elem["href"],**kwargs),
category=elem['href'].split('/')[3],
**kwargs)
for elem in soup.select("article h2 a")
for elem in tqdm(soup.select("article h2 a"))
)

def parse_single_object(self,soup, category=None, **kwargs):
Expand All @@ -71,15 +72,9 @@ def parse_single_object(self,soup, category=None, **kwargs):
# THis is parsed to accomadate some of the changes

# Some div contain title and Artist while some do not
try:
title, artist = soup.select(
'div[class="post-inner"] h1 span[itemprop="name"]'
)[0].text.split(" –")
artist, title = artist.strip(), title.strip()
except Exception:
artist = title = soup.select(
'div[class="post-inner"] h1 span[itemprop="name"]'
)[0].text
description = soup.select(
'div[class="post-inner"] h1 span[itemprop="name"]'
)[0].text.strip()

#Some Soups do not have art links
try:
Expand All @@ -100,7 +95,11 @@ def parse_single_object(self,soup, category=None, **kwargs):
valid_group = list(i for i in regex_group if i != None)
download_link = valid_group[0].find_previous("a")["href"] if len(valid_group) >= 1 else None
if download_link!=None:
return dict(type='track',category=category,artist=artist,title=title,download=download_link,art=art_link,details=(title,download_link))
return dict(
type='track',category=category,
description=description,download=download_link,
art=art_link,
details=(description,download_link))

#For category other than tracks
try:
Expand All @@ -109,6 +108,7 @@ def parse_single_object(self,soup, category=None, **kwargs):
).find_previous("a")["href"]
except Exception:
download_link = None

# Get soup element to extract Song title and Song links for albums
response_group = [
soup.select("li strong a"),
Expand Down Expand Up @@ -149,7 +149,13 @@ def parse_single_object(self,soup, category=None, **kwargs):
tracks_details.append((song_title,song_link))
except Exception:
pass
return dict(type='album',category=category,artist=artist,title=title,download=download_link,art=art_link,details=tracks_details)
return dict(
type='album',category=category,
description=description,
download=download_link,
art=art_link,
details=tracks_details
)


def get_query_params(self, query=None,**kwargs):
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ six==1.16.0
soupsieve==2.3.1
toml==0.10.2
tomli==2.0.1
tqdm==4.66.1
typing_extensions==4.1.1
urllib3==1.26.9
validators==0.18.2
Expand Down

0 comments on commit f9ce411

Please sign in to comment.