Compare commits

...

26 Commits

Author SHA1 Message Date
Petitminion 7c3206bf83 fix build_docs pipeline 2025-04-14 18:34:55 +02:00
petitminion 6fac158374 Update .gitlab-ci.yml file, disable linux/arm/v7 for 1.4.1 2025-04-14 15:29:35 +00:00
Petitminion 06494a538c Version bump and changelog for 1.4.1 2025-04-14 16:19:17 +02:00
petitminion a9d56911ef pin setuptools to 60.10.0 to fix builds 1.4.0 2025-04-14 14:16:08 +00:00
Renovate Bot 7f7f4a1fff chore(api): update dependency django to v3.2.25 2024-05-15 12:34:16 +00:00
Georg Krause 6b8bbc5f80 fix(api): Make trailing slashes for each endpoint optional
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2766>
2024-02-26 13:21:14 +00:00
Renovate Bot 673db74dd2 chore(front): update dependency standardized-audio-context to v25.3.64
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2765>
2024-02-26 11:06:11 +00:00
Renovate Bot 9b0f538c08 chore(docs): update dependency django to v3.2.24
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2764>
2024-02-26 10:35:55 +00:00
Renovate Bot c1608b3459 chore(api): update dependency pyld to v2.0.4
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2761>
2024-02-21 11:06:45 +00:00
Renovate Bot 809b972772 chore(api): update dependency django to v3.2.24
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2760>
2024-02-21 10:33:53 +00:00
Renovate Bot 288bdd163d chore(front): lock file maintenance
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2751>
2024-02-05 14:34:02 +00:00
Renovate Bot 59de22a2fd chore(front): lock file maintenance
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2747>
2024-02-05 00:06:26 +00:00
Renovate Bot 2ec71111a7 chore(front): update dependency standardized-audio-context to v25.3.63
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2718>
2024-01-31 21:03:39 +00:00
Renovate Bot cb307cf296 chore(api): update dependency unidecode to v1.3.8
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2717>
2024-01-31 20:05:25 +00:00
Renovate Bot 06f0cf90f0 chore(api): update dependency pytest to v7.4.4
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2716>
2024-01-31 18:34:17 +00:00
Renovate Bot de5063afef chore(api): update dependency prompt-toolkit to v3.0.43
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2715>
2024-01-31 17:07:21 +00:00
Renovate Bot f43278a709 chore(api): update dependency feedparser to v6.0.11
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2714>
2024-01-31 16:06:02 +00:00
Renovate Bot 15b261d7fd chore: pin dependency poetry-core to ==1.8.1
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2713>
2024-01-31 15:04:08 +00:00
Renovate Bot ec913cfa64 chore(docs): pin dependency poetry-core to ==1.8.1
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2712>
2024-01-31 14:33:45 +00:00
Renovate Bot 5ef3366974 chore(api): pin dependency poetry-core to ==1.8.1
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2711>
2024-01-11 10:05:00 +00:00
Baudouin Feildel c5f582d0e3 Add changelog entry.
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2699>
2024-01-11 11:00:02 +01:00
Baudouin Feildel d0a997643b Fix Apache configuration
Built assets are fetched using path like this: `/assets/foo-a1b2c3.js`. Apache failed to serve those, as it was missing disabling the proxy pass for the static assets folder. This commit adds the necessary configuration for properly serving the static assets.

Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2699>
2024-01-11 10:59:56 +01:00
Georg Krause c21bfce9d0 fix(docs): Fix build script for documentation to properly deploy swagger 2023-12-13 15:55:54 +01:00
Georg Krause 98d4f4ef54 Merge branch 'develop' into stable 2023-12-12 13:29:29 +01:00
Renovate Bot e31f0043b0 chore(front): update dependency standardized-audio-context to v25.3.59
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2645>
2023-11-23 12:34:23 +00:00
Renovate Bot c87a22fb15 chore(api): update dependency aioresponses to v0.7.6
Part-of: <https://dev.funkwhale.audio/funkwhale/funkwhale/-/merge_requests/2644>
2023-11-23 11:07:38 +00:00
24 changed files with 2892 additions and 2719 deletions

View File

@ -248,6 +248,9 @@ test_api:
CACHE_URL: "redis://redis:6379/0"
before_script:
- cd api
- poetry env info
- poetry run pip install "setuptools==60.10.0" wheel
- poetry run pip install --no-use-pep517 django-allauth==0.42.0
- poetry install --all-extras
script:
- >
@ -351,6 +354,8 @@ build_api_schema:
API_TYPE: "v1"
before_script:
- cd api
- poetry run pip install "setuptools==60.10.0" wheel
- poetry run pip install --no-use-pep517 django-allauth==0.42.0
- poetry install --all-extras
- poetry run funkwhale-manage migrate
script:
@ -461,14 +466,14 @@ docker:
- if: $CI_COMMIT_TAG
variables:
BUILD_ARGS: >
--set *.platform=linux/amd64,linux/arm64,linux/arm/v7
--set *.platform=linux/amd64,linux/arm64
--no-cache
--push
- if: $CI_COMMIT_BRANCH =~ /(stable|develop)/
variables:
BUILD_ARGS: >
--set *.platform=linux/amd64,linux/arm64,linux/arm/v7
--set *.platform=linux/amd64,linux/arm64
--set *.cache-from=type=registry,ref=$DOCKER_CACHE_IMAGE:$CI_COMMIT_BRANCH,oci-mediatypes=false
--set *.cache-to=type=registry,ref=$DOCKER_CACHE_IMAGE:$CI_COMMIT_BRANCH,mode=max,oci-mediatypes=false
--push

View File

@ -9,6 +9,20 @@ This changelog is viewable on the web at https://docs.funkwhale.audio/changelog.
<!-- towncrier -->
## 1.4.1 (2025-04-14)
Upgrade instructions are available at https://docs.funkwhale.audio/administrator/upgrade/index.html
Bugfixes:
- Fix 1.4.0 builds
- Fix build script for documentation to properly deploy swagger
- Make trailing slashes optional for all endpoints
Documentation:
- Fixed the sample Apache configuration
## 1.4.0 (2023-12-12)
Upgrade instructions are available at https://docs.funkwhale.audio/administrator/upgrade/index.html

View File

@ -34,10 +34,10 @@ if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r"^400/$", default_views.bad_request),
url(r"^403/$", default_views.permission_denied),
url(r"^404/$", default_views.page_not_found),
url(r"^500/$", default_views.server_error),
url(r"^400/?$", default_views.bad_request),
url(r"^403/?$", default_views.permission_denied),
url(r"^404/?$", default_views.page_not_found),
url(r"^500/?$", default_views.server_error),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if "debug_toolbar" in settings.INSTALLED_APPS:

View File

@ -28,7 +28,7 @@ router.register(r"attachments", common_views.AttachmentViewSet, "attachments")
v1_patterns = router.urls
v1_patterns += [
url(r"^oembed/$", views.OembedView.as_view(), name="oembed"),
url(r"^oembed/?$", views.OembedView.as_view(), name="oembed"),
url(
r"^instance/",
include(("funkwhale_api.instance.urls", "instance"), namespace="instance"),

View File

@ -8,22 +8,22 @@ router = routers.OptionalSlashRouter()
router.register(r"search", views.SearchViewSet, "search")
urlpatterns = [
url(
"releases/(?P<uuid>[0-9a-z-]+)/$",
"releases/(?P<uuid>[0-9a-z-]+)/?$",
views.ReleaseDetail.as_view(),
name="release-detail",
),
url(
"artists/(?P<uuid>[0-9a-z-]+)/$",
"artists/(?P<uuid>[0-9a-z-]+)/?$",
views.ArtistDetail.as_view(),
name="artist-detail",
),
url(
"release-groups/browse/(?P<artist_uuid>[0-9a-z-]+)/$",
"release-groups/browse/(?P<artist_uuid>[0-9a-z-]+)/?$",
views.ReleaseGroupBrowse.as_view(),
name="release-group-browse",
),
url(
"releases/browse/(?P<release_group_uuid>[0-9a-z-]+)/$",
"releases/browse/(?P<release_group_uuid>[0-9a-z-]+)/?$",
views.ReleaseBrowse.as_view(),
name="release-browse",
),

View File

@ -1,148 +1,148 @@
import logging
import time
# import logging
# import time
import troi
import troi.core
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db.models import Q
from requests.exceptions import ConnectTimeout
# import troi
# import troi.core
# from django.core.cache import cache
# from django.core.exceptions import ValidationError
# from django.db.models import Q
# from requests.exceptions import ConnectTimeout
from funkwhale_api.music import models as music_models
from funkwhale_api.typesense import utils
# from funkwhale_api.music import models as music_models
# from funkwhale_api.typesense import utils
logger = logging.getLogger(__name__)
# logger = logging.getLogger(__name__)
patches = troi.utils.discover_patches()
# patches = troi.utils.discover_patches()
SUPPORTED_PATCHES = patches.keys()
# SUPPORTED_PATCHES = patches.keys()
def run(config, **kwargs):
"""Validate the received config and run the queryset generation"""
candidates = kwargs.pop("candidates", music_models.Track.objects.all())
validate(config)
return TroiPatch().get_queryset(config, candidates)
# def run(config, **kwargs):
# """Validate the received config and run the queryset generation"""
# candidates = kwargs.pop("candidates", music_models.Track.objects.all())
# validate(config)
# return TroiPatch().get_queryset(config, candidates)
def validate(config):
patch = config.get("patch")
if patch not in SUPPORTED_PATCHES:
raise ValidationError(
'Invalid patch "{}". Supported patches: {}'.format(
config["patch"], SUPPORTED_PATCHES
)
)
# def validate(config):
# patch = config.get("patch")
# if patch not in SUPPORTED_PATCHES:
# raise ValidationError(
# 'Invalid patch "{}". Supported patches: {}'.format(
# config["patch"], SUPPORTED_PATCHES
# )
# )
return True
# return True
def build_radio_queryset(patch, config, radio_qs):
"""Take a troi patch and its arg, match the missing mbid and then build a radio queryset"""
# def build_radio_queryset(patch, config, radio_qs):
# """Take a troi patch and its arg, match the missing mbid and then build a radio queryset"""
logger.info("Config used for troi radio generation is " + str(config))
# logger.info("Config used for troi radio generation is " + str(config))
start_time = time.time()
try:
recommendations = troi.core.generate_playlist(patch, config)
except ConnectTimeout:
raise ValueError(
"Timed out while connecting to ListenBrainz. No candidates could be retrieved for the radio."
)
end_time_rec = time.time()
logger.info("Troi fetch took :" + str(end_time_rec - start_time))
# start_time = time.time()
# try:
# recommendations = troi.core.generate_playlist(patch, config)
# except ConnectTimeout:
# raise ValueError(
# "Timed out while connecting to ListenBrainz. No candidates could be retrieved for the radio."
# )
# end_time_rec = time.time()
# logger.info("Troi fetch took :" + str(end_time_rec - start_time))
if not recommendations:
raise ValueError("No candidates found by troi")
# if not recommendations:
# raise ValueError("No candidates found by troi")
recommended_mbids = [
recommended_recording.mbid
for recommended_recording in recommendations.playlists[0].recordings
]
# recommended_mbids = [
# recommended_recording.mbid
# for recommended_recording in recommendations.playlists[0].recordings
# ]
logger.info("Searching for MusicBrainz ID in Funkwhale database")
# logger.info("Searching for MusicBrainz ID in Funkwhale database")
qs_recommended = (
music_models.Track.objects.all()
.filter(mbid__in=recommended_mbids)
.order_by("mbid", "pk")
.distinct("mbid")
)
qs_recommended_mbid = [str(i.mbid) for i in qs_recommended]
# qs_recommended = (
# music_models.Track.objects.all()
# .filter(mbid__in=recommended_mbids)
# .order_by("mbid", "pk")
# .distinct("mbid")
# )
# qs_recommended_mbid = [str(i.mbid) for i in qs_recommended]
recommended_mbids_not_qs = [
mbid for mbid in recommended_mbids if mbid not in qs_recommended_mbid
]
cached_match = cache.get_many(recommended_mbids_not_qs)
cached_match_mbid = [str(i) for i in cached_match.keys()]
# recommended_mbids_not_qs = [
# mbid for mbid in recommended_mbids if mbid not in qs_recommended_mbid
# ]
# cached_match = cache.get_many(recommended_mbids_not_qs)
# cached_match_mbid = [str(i) for i in cached_match.keys()]
if qs_recommended and cached_match_mbid:
logger.info("MusicBrainz IDs found in Funkwhale database and redis")
qs_recommended_mbid.extend(cached_match_mbid)
mbids_found = qs_recommended_mbid
elif qs_recommended and not cached_match_mbid:
logger.info("MusicBrainz IDs found in Funkwhale database")
mbids_found = qs_recommended_mbid
elif not qs_recommended and cached_match_mbid:
logger.info("MusicBrainz IDs found in redis cache")
mbids_found = cached_match_mbid
else:
logger.info(
"Couldn't find any matches in Funkwhale database. Trying to match all"
)
mbids_found = []
# if qs_recommended and cached_match_mbid:
# logger.info("MusicBrainz IDs found in Funkwhale database and redis")
# qs_recommended_mbid.extend(cached_match_mbid)
# mbids_found = qs_recommended_mbid
# elif qs_recommended and not cached_match_mbid:
# logger.info("MusicBrainz IDs found in Funkwhale database")
# mbids_found = qs_recommended_mbid
# elif not qs_recommended and cached_match_mbid:
# logger.info("MusicBrainz IDs found in redis cache")
# mbids_found = cached_match_mbid
# else:
# logger.info(
# "Couldn't find any matches in Funkwhale database. Trying to match all"
# )
# mbids_found = []
recommended_recordings_not_found = [
i for i in recommendations.playlists[0].recordings if i.mbid not in mbids_found
]
# recommended_recordings_not_found = [
# i for i in recommendations.playlists[0].recordings if i.mbid not in mbids_found
# ]
logger.info("Matching missing MusicBrainz ID to Funkwhale track")
# logger.info("Matching missing MusicBrainz ID to Funkwhale track")
start_time_resolv = time.time()
utils.resolve_recordings_to_fw_track(recommended_recordings_not_found)
end_time_resolv = time.time()
# start_time_resolv = time.time()
# utils.resolve_recordings_to_fw_track(recommended_recordings_not_found)
# end_time_resolv = time.time()
logger.info(
"Resolving "
+ str(len(recommended_recordings_not_found))
+ " tracks in "
+ str(end_time_resolv - start_time_resolv)
)
# logger.info(
# "Resolving "
# + str(len(recommended_recordings_not_found))
# + " tracks in "
# + str(end_time_resolv - start_time_resolv)
# )
cached_match = cache.get_many(recommended_mbids)
# cached_match = cache.get_many(recommended_mbids)
if not mbids_found and not cached_match:
raise ValueError("No candidates found for troi radio")
# if not mbids_found and not cached_match:
# raise ValueError("No candidates found for troi radio")
mbids_found_pks = list(
music_models.Track.objects.all()
.filter(mbid__in=mbids_found)
.order_by("mbid", "pk")
.distinct("mbid")
.values_list("pk", flat=True)
)
# mbids_found_pks = list(
# music_models.Track.objects.all()
# .filter(mbid__in=mbids_found)
# .order_by("mbid", "pk")
# .distinct("mbid")
# .values_list("pk", flat=True)
# )
mbids_found_pks_unique = [
i for i in mbids_found_pks if i not in cached_match.keys()
]
# mbids_found_pks_unique = [
# i for i in mbids_found_pks if i not in cached_match.keys()
# ]
if mbids_found and cached_match:
return radio_qs.filter(
Q(pk__in=mbids_found_pks_unique) | Q(pk__in=cached_match.values())
)
if mbids_found and not cached_match:
return radio_qs.filter(pk__in=mbids_found_pks_unique)
# if mbids_found and cached_match:
# return radio_qs.filter(
# Q(pk__in=mbids_found_pks_unique) | Q(pk__in=cached_match.values())
# )
# if mbids_found and not cached_match:
# return radio_qs.filter(pk__in=mbids_found_pks_unique)
if not mbids_found and cached_match:
return radio_qs.filter(pk__in=cached_match.values())
# if not mbids_found and cached_match:
# return radio_qs.filter(pk__in=cached_match.values())
class TroiPatch:
code = "troi-patch"
label = "Troi Patch"
# class TroiPatch:
# code = "troi-patch"
# label = "Troi Patch"
def get_queryset(self, config, qs):
patch_string = config.pop("patch")
patch = patches[patch_string]
return build_radio_queryset(patch(), config, qs)
# def get_queryset(self, config, qs):
# patch_string = config.pop("patch")
# patch = patches[patch_string]
# return build_radio_queryset(patch(), config, qs)

View File

@ -1,111 +1,111 @@
from troi import Artist, Element, Playlist, Recording
from troi.patch import Patch
# from troi import Artist, Element, Playlist, Recording
# from troi.patch import Patch
recording_list = [
Recording(
name="I Want It That Way",
mbid="87dfa566-21c3-45ed-bc42-1d345b8563fa",
artist=Artist(name="artist_name"),
),
Recording(name="Untouchable", artist=Artist(name="Another lol")),
Recording(
name="The Perfect Kiss",
mbid="ec0da94e-fbfe-4eb0-968e-024d4c32d1d0",
artist=Artist(name="artist_name2"),
),
Recording(
name="Love Your Voice",
mbid="93726547-f8c0-4efd-8e16-d2dee76500f6",
artist=Artist(name="artist_name"),
),
Recording(
name="Hall of Fame",
mbid="395bd5a1-79cc-4e04-8869-ca9eabc78d09",
artist=Artist(name="artist_name_3"),
),
]
# recording_list = [
# Recording(
# name="I Want It That Way",
# mbid="87dfa566-21c3-45ed-bc42-1d345b8563fa",
# artist=Artist(name="artist_name"),
# ),
# Recording(name="Untouchable", artist=Artist(name="Another lol")),
# Recording(
# name="The Perfect Kiss",
# mbid="ec0da94e-fbfe-4eb0-968e-024d4c32d1d0",
# artist=Artist(name="artist_name2"),
# ),
# Recording(
# name="Love Your Voice",
# mbid="93726547-f8c0-4efd-8e16-d2dee76500f6",
# artist=Artist(name="artist_name"),
# ),
# Recording(
# name="Hall of Fame",
# mbid="395bd5a1-79cc-4e04-8869-ca9eabc78d09",
# artist=Artist(name="artist_name_3"),
# ),
# ]
class DummyElement(Element):
"""Dummy element that returns a fixed playlist for testing"""
# class DummyElement(Element):
# """Dummy element that returns a fixed playlist for testing"""
@staticmethod
def outputs():
return [Playlist]
# @staticmethod
# def outputs():
# return [Playlist]
def read(self, sources):
recordings = recording_list
# def read(self, sources):
# recordings = recording_list
return [
Playlist(
name="Test Export Playlist",
description="A playlist to test exporting playlists to spotify",
recordings=recordings,
)
]
# return [
# Playlist(
# name="Test Export Playlist",
# description="A playlist to test exporting playlists to spotify",
# recordings=recordings,
# )
# ]
class DummyPatch(Patch):
"""Dummy patch that always returns a fixed set of recordings for testing"""
# class DummyPatch(Patch):
# """Dummy patch that always returns a fixed set of recordings for testing"""
@staticmethod
def slug():
return "test-patch"
# @staticmethod
# def slug():
# return "test-patch"
def create(self, inputs):
return DummyElement()
# def create(self, inputs):
# return DummyElement()
@staticmethod
def outputs():
return [Recording]
# @staticmethod
# def outputs():
# return [Recording]
recommended_recording_mbids = [
"87dfa566-21c3-45ed-bc42-1d345b8563fa",
"ec0da94e-fbfe-4eb0-968e-024d4c32d1d0",
"93726547-f8c0-4efd-8e16-d2dee76500f6",
"395bd5a1-79cc-4e04-8869-ca9eabc78d09",
]
# recommended_recording_mbids = [
# "87dfa566-21c3-45ed-bc42-1d345b8563fa",
# "ec0da94e-fbfe-4eb0-968e-024d4c32d1d0",
# "93726547-f8c0-4efd-8e16-d2dee76500f6",
# "395bd5a1-79cc-4e04-8869-ca9eabc78d09",
# ]
typesense_search_result = {
"facet_counts": [],
"found": 1,
"out_of": 1,
"page": 1,
"request_params": {
"collection_name": "canonical_fw_data",
"per_page": 10,
"q": "artist_nameiwantitthatway",
},
"search_time_ms": 1,
"hits": [
{
"highlights": [
{
"field": "combined",
"snippet": "string",
"matched_tokens": ["string"],
}
],
"document": {
"pk": "1",
"combined": "artist_nameiwantitthatway",
},
"text_match": 130916,
},
{
"highlights": [
{
"field": "combined",
"snippet": "string",
"matched_tokens": ["string"],
}
],
"document": {
"pk": "2",
"combined": "artist_nameiwantitthatway",
},
"text_match": 130916,
},
],
}
# typesense_search_result = {
# "facet_counts": [],
# "found": 1,
# "out_of": 1,
# "page": 1,
# "request_params": {
# "collection_name": "canonical_fw_data",
# "per_page": 10,
# "q": "artist_nameiwantitthatway",
# },
# "search_time_ms": 1,
# "hits": [
# {
# "highlights": [
# {
# "field": "combined",
# "snippet": "string",
# "matched_tokens": ["string"],
# }
# ],
# "document": {
# "pk": "1",
# "combined": "artist_nameiwantitthatway",
# },
# "text_match": 130916,
# },
# {
# "highlights": [
# {
# "field": "combined",
# "snippet": "string",
# "matched_tokens": ["string"],
# }
# ],
# "document": {
# "pk": "2",
# "combined": "artist_nameiwantitthatway",
# },
# "text_match": 130916,
# },
# ],
# }

View File

@ -1,92 +1,92 @@
import logging
import re
# import logging
# import re
import unidecode
from django.conf import settings
from django.core.cache import cache
from lb_matching_tools.cleaner import MetadataCleaner
# import unidecode
# from django.conf import settings
# from django.core.cache import cache
# from lb_matching_tools.cleaner import MetadataCleaner
from funkwhale_api.music import models as music_models
# from funkwhale_api.music import models as music_models
logger = logging.getLogger(__name__)
# logger = logging.getLogger(__name__)
api_key = settings.TYPESENSE_API_KEY
host = settings.TYPESENSE_HOST
port = settings.TYPESENSE_PORT
protocol = settings.TYPESENSE_PROTOCOL
TYPESENSE_NUM_TYPO = settings.TYPESENSE_NUM_TYPO
# api_key = settings.TYPESENSE_API_KEY
# host = settings.TYPESENSE_HOST
# port = settings.TYPESENSE_PORT
# protocol = settings.TYPESENSE_PROTOCOL
# TYPESENSE_NUM_TYPO = settings.TYPESENSE_NUM_TYPO
class TypesenseNotActivate(Exception):
pass
# class TypesenseNotActivate(Exception):
# pass
if not settings.TYPESENSE_API_KEY:
logger.info(
"Typesense is not activated. You can enable it by setting the TYPESENSE_API_KEY env variable."
)
else:
import typesense
# if not settings.TYPESENSE_API_KEY:
# logger.info(
# "Typesense is not activated. You can enable it by setting the TYPESENSE_API_KEY env variable."
# )
# else:
# import typesense
def delete_non_alnum_characters(text):
return unidecode.unidecode(re.sub(r"[^\w]+", "", text).lower())
# def delete_non_alnum_characters(text):
# return unidecode.unidecode(re.sub(r"[^\w]+", "", text).lower())
def resolve_recordings_to_fw_track(recordings):
"""
Tries to match a troi recording entity to a fw track using the typesense index.
It will save the results in the match_mbid attribute of the Track table.
For test purposes : if multiple fw tracks are returned, we log the information
but only keep the best result in db to avoid duplicates.
"""
# def resolve_recordings_to_fw_track(recordings):
# """
# Tries to match a troi recording entity to a fw track using the typesense index.
# It will save the results in the match_mbid attribute of the Track table.
# For test purposes : if multiple fw tracks are returned, we log the information
# but only keep the best result in db to avoid duplicates.
# """
if not settings.TYPESENSE_API_KEY:
raise TypesenseNotActivate(
"Typesense is not activated. You can enable it by setting the TYPESENSE_API_KEY env variable."
)
# if not settings.TYPESENSE_API_KEY:
# raise TypesenseNotActivate(
# "Typesense is not activated. You can enable it by setting the TYPESENSE_API_KEY env variable."
# )
client = typesense.Client(
{
"api_key": api_key,
"nodes": [{"host": host, "port": port, "protocol": protocol}],
"connection_timeout_seconds": 2,
}
)
# client = typesense.Client(
# {
# "api_key": api_key,
# "nodes": [{"host": host, "port": port, "protocol": protocol}],
# "connection_timeout_seconds": 2,
# }
# )
mc = MetadataCleaner()
# mc = MetadataCleaner()
for recording in recordings:
rec = mc.clean_recording(recording.name)
artist = mc.clean_artist(recording.artist.name)
canonical_name_for_track = delete_non_alnum_characters(artist + rec)
# for recording in recordings:
# rec = mc.clean_recording(recording.name)
# artist = mc.clean_artist(recording.artist.name)
# canonical_name_for_track = delete_non_alnum_characters(artist + rec)
logger.debug(f"Trying to resolve : {canonical_name_for_track}")
# logger.debug(f"Trying to resolve : {canonical_name_for_track}")
search_parameters = {
"q": canonical_name_for_track,
"query_by": "combined",
"num_typos": TYPESENSE_NUM_TYPO,
"drop_tokens_threshold": 0,
}
matches = client.collections["canonical_fw_data"].documents.search(
search_parameters
)
# search_parameters = {
# "q": canonical_name_for_track,
# "query_by": "combined",
# "num_typos": TYPESENSE_NUM_TYPO,
# "drop_tokens_threshold": 0,
# }
# matches = client.collections["canonical_fw_data"].documents.search(
# search_parameters
# )
if matches["hits"]:
hit = matches["hits"][0]
pk = hit["document"]["pk"]
logger.debug(f"Saving match for track with primary key {pk}")
cache.set(recording.mbid, pk)
# if matches["hits"]:
# hit = matches["hits"][0]
# pk = hit["document"]["pk"]
# logger.debug(f"Saving match for track with primary key {pk}")
# cache.set(recording.mbid, pk)
if settings.DEBUG and matches["hits"][1]:
for hit in matches["hits"][1:]:
pk = hit["document"]["pk"]
fw_track = music_models.Track.objects.get(pk=pk)
logger.info(
f"Duplicate match found for {fw_track.artist.name} {fw_track.title} \
and primary key {pk}. Skipping because of better match."
)
else:
logger.debug("No match found in fw db")
return cache.get_many([rec.mbid for rec in recordings])
# if settings.DEBUG and matches["hits"][1]:
# for hit in matches["hits"][1:]:
# pk = hit["document"]["pk"]
# fw_track = music_models.Track.objects.get(pk=pk)
# logger.info(
# f"Duplicate match found for {fw_track.artist.name} {fw_track.title} \
# and primary key {pk}. Skipping because of better match."
# )
# else:
# logger.debug("No match found in fw db")
# return cache.get_many([rec.mbid for rec in recordings])

View File

@ -10,7 +10,7 @@ router.register(r"apps", views.ApplicationViewSet, "apps")
router.register(r"grants", views.GrantViewSet, "grants")
urlpatterns = router.urls + [
url("^authorize/$", csrf_exempt(views.AuthorizeView.as_view()), name="authorize"),
url("^token/$", views.TokenView.as_view(), name="token"),
url("^revoke/$", views.RevokeTokenView.as_view(), name="revoke"),
url("^authorize/?$", csrf_exempt(views.AuthorizeView.as_view()), name="authorize"),
url("^token/?$", views.TokenView.as_view(), name="token"),
url("^revoke/?$", views.RevokeTokenView.as_view(), name="revoke"),
]

View File

@ -7,26 +7,26 @@ from . import views
urlpatterns = [
# URLs that do not require a session or valid token
url(
r"^password/reset/$",
r"^password/reset/?$",
views.PasswordResetView.as_view(),
name="rest_password_reset",
),
url(
r"^password/reset/confirm/$",
r"^password/reset/confirm/?$",
views.PasswordResetConfirmView.as_view(),
name="rest_password_reset_confirm",
),
# URLs that require a user to be logged in with a valid session / token.
url(
r"^user/$", rest_auth_views.UserDetailsView.as_view(), name="rest_user_details"
r"^user/?$", rest_auth_views.UserDetailsView.as_view(), name="rest_user_details"
),
url(
r"^password/change/$",
r"^password/change/?$",
views.PasswordChangeView.as_view(),
name="rest_password_change",
),
# Registration URLs
url(r"^registration/$", views.RegisterView.as_view(), name="rest_register"),
url(r"^registration/?$", views.RegisterView.as_view(), name="rest_register"),
url(
r"^registration/verify-email/?$",
views.VerifyEmailView.as_view(),

3577
api/poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[tool.poetry]
name = "funkwhale-api"
version = "1.4.0"
version = "1.4.1"
description = "Funkwhale API"
authors = ["Funkwhale Collective"]
@ -26,10 +26,11 @@ funkwhale-manage = 'funkwhale_api.main:main'
[tool.poetry.dependencies]
python = "^3.8,<3.12"
setuptools = "==60.10.0"
# Django
dj-rest-auth = { extras = ["with_social"], version = "2.2.8" }
django = "==3.2.23"
django = "==3.2.24"
django-allauth = "==0.42.0"
django-cache-memoize = "0.1.10"
django-cacheops = "==6.1"
@ -73,19 +74,17 @@ bleach = "==5.0.1"
boto3 = "==1.26.161"
click = "==8.1.7"
cryptography = "==38.0.4"
feedparser = "==6.0.10"
feedparser = "==6.0.11"
musicbrainzngs = "==0.7.1"
mutagen = "==1.46.0"
pillow = "==9.3.0"
pydub = "==0.25.1"
pyld = "==2.0.3"
pyld = "==2.0.4"
python-magic = "==0.4.27"
requests = "==2.28.2"
requests-http-message-signatures = "==0.3.1"
sentry-sdk = "==1.19.1"
watchdog = "==2.2.1"
troi = { git = "https://github.com/metabrainz/troi-recommendation-playground.git", tag = "v-2023-10-30.0"}
lb-matching-tools = { git = "https://github.com/metabrainz/listenbrainz-matching-tools.git", branch = "main"}
unidecode = "==1.3.7"
pycountry = "22.3.5"
@ -97,7 +96,6 @@ ipython = "==7.34.0"
pluralizer = "==1.2.0"
service-identity = "==21.1.0"
unicode-slugify = "==0.1.5"
[tool.poetry.group.dev.dependencies]
aioresponses = "==0.7.6"
asynctest = "==0.13.0"
@ -110,9 +108,9 @@ factory-boy = "==3.2.1"
faker = "==15.3.4"
flake8 = "==3.9.2"
ipdb = "==0.13.13"
pytest = "==7.4.3"
pytest = "==7.4.4"
pytest-asyncio = "==0.21.0"
prompt-toolkit = "==3.0.41"
prompt-toolkit = "==3.0.43"
pytest-cov = "==4.0.0"
pytest-django = "==4.5.2"
pytest-env = "==0.8.2"
@ -128,7 +126,8 @@ django-extensions = "==3.2.3"
typesense = ["typesense"]
[build-system]
requires = ["poetry-core>=1.0.0"]
requires = ["poetry-core==1.8.1", "setuptools==60.10.0"
]
build-backend = "poetry.core.masonry.api"
[tool.pylint.master]

View File

@ -1,116 +1,116 @@
import pytest
import troi.core
from django.core.cache import cache
from django.db.models import Q
from requests.exceptions import ConnectTimeout
# import pytest
# import troi.core
# from django.core.cache import cache
# from django.db.models import Q
# from requests.exceptions import ConnectTimeout
from funkwhale_api.music.models import Track
from funkwhale_api.radios import lb_recommendations
from funkwhale_api.typesense import factories as custom_factories
from funkwhale_api.typesense import utils
# from funkwhale_api.music.models import Track
# from funkwhale_api.radios import lb_recommendations
# from funkwhale_api.typesense import factories as custom_factories
# from funkwhale_api.typesense import utils
def test_can_build_radio_queryset_with_fw_db(factories, mocker):
factories["music.Track"](
title="I Want It That Way", mbid="87dfa566-21c3-45ed-bc42-1d345b8563fa"
)
factories["music.Track"](
title="The Perfect Kiss", mbid="ec0da94e-fbfe-4eb0-968e-024d4c32d1d0"
)
factories["music.Track"]()
# def test_can_build_radio_queryset_with_fw_db(factories, mocker):
# factories["music.Track"](
# title="I Want It That Way", mbid="87dfa566-21c3-45ed-bc42-1d345b8563fa"
# )
# factories["music.Track"](
# title="The Perfect Kiss", mbid="ec0da94e-fbfe-4eb0-968e-024d4c32d1d0"
# )
# factories["music.Track"]()
qs = Track.objects.all()
# qs = Track.objects.all()
mocker.patch("funkwhale_api.typesense.utils.resolve_recordings_to_fw_track")
# mocker.patch("funkwhale_api.typesense.utils.resolve_recordings_to_fw_track")
radio_qs = lb_recommendations.build_radio_queryset(
custom_factories.DummyPatch(), {"min_recordings": 1}, qs
)
recommended_recording_mbids = [
"87dfa566-21c3-45ed-bc42-1d345b8563fa",
"ec0da94e-fbfe-4eb0-968e-024d4c32d1d0",
]
# radio_qs = lb_recommendations.build_radio_queryset(
# custom_factories.DummyPatch(), {"min_recordings": 1}, qs
# )
# recommended_recording_mbids = [
# "87dfa566-21c3-45ed-bc42-1d345b8563fa",
# "ec0da94e-fbfe-4eb0-968e-024d4c32d1d0",
# ]
assert list(
Track.objects.all().filter(Q(mbid__in=recommended_recording_mbids))
) == list(radio_qs)
# assert list(
# Track.objects.all().filter(Q(mbid__in=recommended_recording_mbids))
# ) == list(radio_qs)
def test_build_radio_queryset_without_fw_db(mocker):
resolve_recordings_to_fw_track = mocker.patch.object(
utils, "resolve_recordings_to_fw_track", return_value=None
)
# mocker.patch.object(cache, "get_many", return_value=None)
# def test_build_radio_queryset_without_fw_db(mocker):
# resolve_recordings_to_fw_track = mocker.patch.object(
# utils, "resolve_recordings_to_fw_track", return_value=None
# )
# # mocker.patch.object(cache, "get_many", return_value=None)
qs = Track.objects.all()
# qs = Track.objects.all()
with pytest.raises(ValueError):
lb_recommendations.build_radio_queryset(
custom_factories.DummyPatch(), {"min_recordings": 1}, qs
)
# with pytest.raises(ValueError):
# lb_recommendations.build_radio_queryset(
# custom_factories.DummyPatch(), {"min_recordings": 1}, qs
# )
assert resolve_recordings_to_fw_track.called_once_with(
custom_factories.recommended_recording_mbids
)
# assert resolve_recordings_to_fw_track.called_once_with(
# custom_factories.recommended_recording_mbids
# )
def test_build_radio_queryset_with_redis_and_fw_db(factories, mocker):
factories["music.Track"](
pk="1", title="I Want It That Way", mbid="87dfa566-21c3-45ed-bc42-1d345b8563fa"
)
mocker.patch.object(utils, "resolve_recordings_to_fw_track", return_value=None)
redis_cache = {}
redis_cache["ec0da94e-fbfe-4eb0-968e-024d4c32d1d0"] = 2
mocker.patch.object(cache, "get_many", return_value=redis_cache)
# def test_build_radio_queryset_with_redis_and_fw_db(factories, mocker):
# factories["music.Track"](
# pk="1", title="I Want It That Way", mbid="87dfa566-21c3-45ed-bc42-1d345b8563fa"
# )
# mocker.patch.object(utils, "resolve_recordings_to_fw_track", return_value=None)
# redis_cache = {}
# redis_cache["ec0da94e-fbfe-4eb0-968e-024d4c32d1d0"] = 2
# mocker.patch.object(cache, "get_many", return_value=redis_cache)
qs = Track.objects.all()
# qs = Track.objects.all()
assert list(
lb_recommendations.build_radio_queryset(
custom_factories.DummyPatch(), {"min_recordings": 1}, qs
)
) == list(Track.objects.all().filter(pk__in=[1, 2]))
# assert list(
# lb_recommendations.build_radio_queryset(
# custom_factories.DummyPatch(), {"min_recordings": 1}, qs
# )
# ) == list(Track.objects.all().filter(pk__in=[1, 2]))
def test_build_radio_queryset_with_redis_and_without_fw_db(factories, mocker):
factories["music.Track"](
pk="1", title="Super title", mbid="87dfaaaa-2aaa-45ed-bc42-1d34aaaaaaaa"
)
mocker.patch.object(utils, "resolve_recordings_to_fw_track", return_value=None)
redis_cache = {}
redis_cache["87dfa566-21c3-45ed-bc42-1d345b8563fa"] = 1
mocker.patch.object(cache, "get_many", return_value=redis_cache)
qs = Track.objects.all()
# def test_build_radio_queryset_with_redis_and_without_fw_db(factories, mocker):
# factories["music.Track"](
# pk="1", title="Super title", mbid="87dfaaaa-2aaa-45ed-bc42-1d34aaaaaaaa"
# )
# mocker.patch.object(utils, "resolve_recordings_to_fw_track", return_value=None)
# redis_cache = {}
# redis_cache["87dfa566-21c3-45ed-bc42-1d345b8563fa"] = 1
# mocker.patch.object(cache, "get_many", return_value=redis_cache)
# qs = Track.objects.all()
assert list(
lb_recommendations.build_radio_queryset(
custom_factories.DummyPatch(), {"min_recordings": 1}, qs
)
) == list(Track.objects.all().filter(pk=1))
# assert list(
# lb_recommendations.build_radio_queryset(
# custom_factories.DummyPatch(), {"min_recordings": 1}, qs
# )
# ) == list(Track.objects.all().filter(pk=1))
def test_build_radio_queryset_catch_troi_ConnectTimeout(mocker):
mocker.patch.object(
troi.core,
"generate_playlist",
side_effect=ConnectTimeout,
)
qs = Track.objects.all()
# def test_build_radio_queryset_catch_troi_ConnectTimeout(mocker):
# mocker.patch.object(
# troi.core,
# "generate_playlist",
# side_effect=ConnectTimeout,
# )
# qs = Track.objects.all()
with pytest.raises(ValueError):
lb_recommendations.build_radio_queryset(
custom_factories.DummyPatch(), {"min_recordings": 1}, qs
)
# with pytest.raises(ValueError):
# lb_recommendations.build_radio_queryset(
# custom_factories.DummyPatch(), {"min_recordings": 1}, qs
# )
def test_build_radio_queryset_catch_troi_no_candidates(mocker):
mocker.patch.object(
troi.core,
"generate_playlist",
)
qs = Track.objects.all()
# def test_build_radio_queryset_catch_troi_no_candidates(mocker):
# mocker.patch.object(
# troi.core,
# "generate_playlist",
# )
# qs = Track.objects.all()
with pytest.raises(ValueError):
lb_recommendations.build_radio_queryset(
custom_factories.DummyPatch(), {"min_recordings": 1}, qs
)
# with pytest.raises(ValueError):
# lb_recommendations.build_radio_queryset(
# custom_factories.DummyPatch(), {"min_recordings": 1}, qs
# )

View File

@ -1,43 +1,43 @@
import requests_mock
import typesense
from django.core.cache import cache
# import requests_mock
# import typesense
# from django.core.cache import cache
from funkwhale_api.typesense import factories as custom_factories
from funkwhale_api.typesense import utils
# from funkwhale_api.typesense import factories as custom_factories
# from funkwhale_api.typesense import utils
def test_resolve_recordings_to_fw_track(mocker, factories):
artist = factories["music.Artist"](name="artist_name")
factories["music.Track"](
pk=1,
title="I Want It That Way",
artist=artist,
mbid="87dfa566-21c3-45ed-bc42-1d345b8563fa",
)
factories["music.Track"](
pk=2,
title="I Want It That Way",
artist=artist,
)
# def test_resolve_recordings_to_fw_track(mocker, factories):
# artist = factories["music.Artist"](name="artist_name")
# factories["music.Track"](
# pk=1,
# title="I Want It That Way",
# artist=artist,
# mbid="87dfa566-21c3-45ed-bc42-1d345b8563fa",
# )
# factories["music.Track"](
# pk=2,
# title="I Want It That Way",
# artist=artist,
# )
client = typesense.Client(
{
"api_key": "api_key",
"nodes": [{"host": "host", "port": "port", "protocol": "protocol"}],
"connection_timeout_seconds": 2,
}
)
with requests_mock.Mocker() as r_mocker:
mocker.patch.object(typesense, "Client", return_value=client)
mocker.patch.object(
typesense.client.ApiCall,
"post",
return_value=custom_factories.typesense_search_result,
)
r_mocker.get(
"protocol://host:port/collections/canonical_fw_data/documents/search",
json=custom_factories.typesense_search_result,
)
# client = typesense.Client(
# {
# "api_key": "api_key",
# "nodes": [{"host": "host", "port": "port", "protocol": "protocol"}],
# "connection_timeout_seconds": 2,
# }
# )
# with requests_mock.Mocker() as r_mocker:
# mocker.patch.object(typesense, "Client", return_value=client)
# mocker.patch.object(
# typesense.client.ApiCall,
# "post",
# return_value=custom_factories.typesense_search_result,
# )
# r_mocker.get(
# "protocol://host:port/collections/canonical_fw_data/documents/search",
# json=custom_factories.typesense_search_result,
# )
utils.resolve_recordings_to_fw_track(custom_factories.recording_list)
assert cache.get("87dfa566-21c3-45ed-bc42-1d345b8563fa") == "1"
# utils.resolve_recordings_to_fw_track(custom_factories.recording_list)
# assert cache.get("87dfa566-21c3-45ed-bc42-1d345b8563fa") == "1"

View File

@ -0,0 +1 @@
Fixed the sample Apache configuration

View File

@ -0,0 +1 @@
Fix build script for documentation to properly deploy swagger

View File

@ -0,0 +1 @@
Make trailing slashes optional for all endpoints

View File

@ -94,6 +94,11 @@ Define MEDIA_DIRECTORY_PATH ${FUNKWHALE_ROOT_PATH}/data/media
</Location>
Alias /front ${FUNKWHALE_ROOT_PATH}/front/dist
<Location "/assets">
ProxyPass "!"
</Location>
Alias /assets ${FUNKWHALE_ROOT_PATH}/front/dist/assets
<Location "/media">
ProxyPass "!"
</Location>

View File

@ -14,7 +14,7 @@ $(VENV):
$(MAKE) install
install:
poetry install
poetry install --no-root
poetry run pip install --no-deps --editable ../api
clean:
@ -47,10 +47,10 @@ locale-prune-untranslated: $(VENV)
# Swagger
SWAGGER_VERSION = 5.1.2
SWAGGER_RELEASE_URL = https://github.com/swagger-api/swagger-ui/archive/refs/tags/v$(SWAGGER_VERSION).tar.gz
SWAGGER_BUILD_DIR = swagger
SWAGGER_BUILD_DIR = "$(BUILD_DIR)/swagger"
swagger:
mkdir "$(SWAGGER_BUILD_DIR)"
mkdir -p "$(SWAGGER_BUILD_DIR)"
curl -sSL "$(SWAGGER_RELEASE_URL)" | \
tar --extract \
--gzip \

9
docs/poetry.lock generated
View File

@ -72,7 +72,7 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7
[[package]]
name = "django"
version = "3.2.23"
version = "3.2.24"
description = "A high-level Python Web framework that encourages rapid development and clean, pragmatic design."
category = "main"
optional = false
@ -576,7 +576,7 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "1.1"
python-versions = "^3.8"
content-hash = "ece2cc9c958fc0ba6daf1213b80e849dc9357d5fd8bdd09d436dfbf605dccd7c"
content-hash = "c72701986feaf309054e359d54f1bb0508cb753c68e5ec4e4ad8d3c75b2af6f0"
[metadata.files]
alabaster = [
@ -696,8 +696,8 @@ colorama = [
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
django = [
{file = "Django-3.2.23-py3-none-any.whl", hash = "sha256:d48608d5f62f2c1e260986835db089fa3b79d6f58510881d316b8d88345ae6e1"},
{file = "Django-3.2.23.tar.gz", hash = "sha256:82968f3640e29ef4a773af2c28448f5f7a08d001c6ac05b32d02aeee6509508b"},
{file = "Django-3.2.24-py3-none-any.whl", hash = "sha256:5dd5b787c3ba39637610fe700f54bf158e33560ea0dba600c19921e7ff926ec5"},
{file = "Django-3.2.24.tar.gz", hash = "sha256:aaee9fb0fb4ebd4311520887ad2e33313d368846607f82a9a0ed461cd4c35b18"},
]
django-environ = [
{file = "django-environ-0.10.0.tar.gz", hash = "sha256:b3559a91439c9d774a9e0c1ced872364772c612cdf6dc919506a2b13f7a77225"},
@ -836,6 +836,7 @@ PyYAML = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},

View File

@ -16,12 +16,12 @@ sphinx-rtd-theme = "==1.1.1"
sphinxcontrib-mermaid = "0.7.1"
myst-parser = "1.0.0"
django = "==3.2.23"
django = "==3.2.24"
django-environ = "==0.10.0"
[tool.poetry.group.dev.dependencies]
sphinx-autobuild = "2021.3.14"
[build-system]
requires = ["poetry-core>=1.0.0"]
requires = ["poetry-core==1.8.1"]
build-backend = "poetry.core.masonry.api"

View File

@ -39,7 +39,7 @@
"moment": "2.29.4",
"showdown": "2.1.0",
"stacktrace-js": "2.0.2",
"standardized-audio-context": "25.3.60",
"standardized-audio-context": "25.3.64",
"text-clipper": "2.2.0",
"transliteration": "2.3.5",
"universal-cookie": "4.0.4",

File diff suppressed because it is too large Load Diff

View File

@ -12,7 +12,7 @@ python = "^3.8"
pytest = "7.3.1"
[build-system]
requires = ["poetry-core>=1.0.0"]
requires = ["poetry-core==1.8.1"]
build-backend = "poetry.core.masonry.api"
[tool.pytest.ini_options]