Skip to content

Commit

Permalink
Updated list and calendar views
Browse files Browse the repository at this point in the history
  • Loading branch information
aropan committed Jul 27, 2024
1 parent 423f8aa commit b93933f
Show file tree
Hide file tree
Showing 368 changed files with 36,541 additions and 24,794 deletions.
3 changes: 2 additions & 1 deletion src/clist/management/commands/parse_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def handle(self, *args, **options):
contests = contests.order_by('-end_time')

for contest in tqdm(contests, total=contests.count(), desc='Contests'):
cache = dict()
modules = dict()
resource = contest.resource
if resource not in modules:
Expand All @@ -73,7 +74,7 @@ def handle(self, *args, **options):
problems = deepcopy(contest.info.get('problems'))
updated = False
for problem in contest.problems_list:
info = module.get_problem_info(problem)
info = module.get_problem_info(problem, contest=contest, cache=cache)
if info is None:
continue
problem.update(info)
Expand Down
13 changes: 8 additions & 5 deletions src/clist/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,14 @@ class Resource(BaseModel):
statistics_fields = models.JSONField(default=dict, blank=True)
skip_for_contests_chart = models.BooleanField(default=False)

RATING_FIELDS = ('old_rating', 'new_rating', 'rating', 'rating_perf', 'performance', 'raw_rating',
'OldRating', 'Rating', 'NewRating', 'Performance',
'predicted_old_rating', 'predicted_new_rating', 'predicted_rating_perf', 'predicted_raw_rating',
'rating_prediction_old_rating', 'rating_prediction_new_rating', 'rating_prediction_rating_perf',
'rating_prediction_raw_rating')
RATING_FIELDS = (
'old_rating', 'new_rating', 'rating', 'rating_perf', 'performance', 'raw_rating',
'OldRating', 'Rating', 'NewRating', 'Performance',
'predicted_old_rating', 'predicted_new_rating', 'predicted_rating_perf', 'predicted_raw_rating',
'rating_prediction_old_rating', 'rating_prediction_new_rating', 'rating_prediction_rating_perf',
'rating_prediction_raw_rating',
'native_rating',
)

event_logs = GenericRelation('logify.EventLog', related_query_name='resource')

Expand Down
17 changes: 13 additions & 4 deletions src/clist/templatetags/extras.py
Original file line number Diff line number Diff line change
Expand Up @@ -1006,12 +1006,21 @@ def as_number(value, force=False):
return value


@register.filter
def title_field(value):
def _title_field(value):
value = re.sub('([A-Z]+)', r'_\1', value)
values = re.split('_+', value)
value = ' '.join([v.title() for v in values])
return value
values = [v.title() for v in values]
return values


@register.filter
def title_field(value):
return ' '.join(_title_field(value))


@register.filter
def title_field_div(value, split=False):
return mark_safe(''.join([f'<div>{f}</div>' for f in _title_field(value)]))


def normalize_field(k):
Expand Down
3 changes: 2 additions & 1 deletion src/clist/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,7 @@ def get_events(request):
'hr_duration': contest.hr_duration,
'color': color,
'icon': contest.resource.icon,
'allDay': contest.full_duration >= timedelta(days=1),
}
if coder:
c['favorite'] = contest.is_favorite
Expand Down Expand Up @@ -680,7 +681,7 @@ def resource(request, host, template='resource.html', extra_context=None):
'url': reverse('ranking:standings_list') + f'?resource={resource.pk}',
},
'coming': {
'contests': contests.filter(start_time__gt=now).order_by('start_time'),
'contests': contests.filter(start_time__gt=now).order_by('start_time', 'id'),
'field': 'start_time',
'url': reverse('clist:main') + f'?resource={resource.pk}&view=list&group=no&status=coming',
},
Expand Down
1 change: 1 addition & 0 deletions src/pyclist/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -686,6 +686,7 @@ def show_toolbar_callback(request):
'chart': '<i class="fa-fw fas fa-chart-bar"></i>',
'ghost': '<i class="fs-fw fas fa-ghost"></i>',
'top': '<i class="fas fa-list-ol"></i>',
'coders': '<i class="fas fa-users"></i>',
'accounts': '<i class="fa-regular fa-rectangle-list"></i>',
'problems': '<i class="fa-solid fa-list-check"></i>',
'submissions': '<i class="fa-solid fa-bars"></i>',
Expand Down
5 changes: 3 additions & 2 deletions src/ranking/admin.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,9 +139,10 @@ def _adv(self, obj):

@admin_register(Stage)
class StageAdmin(BaseModelAdmin):
list_display = ['contest', 'filter_params', 'score_params']
search_fields = ['contest__title', 'contest__resource__host']
list_display = ['contest', 'filter_params']
search_fields = ['contest__title', 'contest__resource__host', 'filter_params', 'score_params']
list_filter = ['contest__host']
ordering = ['-contest__start_time']

def parse_stage(self, request, queryset):
for stage in queryset:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,8 @@ def get_old_ratings(contest):
contest__start_time__lt=contest.start_time,
contest__resource=resource,
account__in=accounts,
).exclude(
contest__is_rated=False,
).annotate(
latest_rating=Case(
When(**{f'addition__{rating_field}__isnull': False}, then=FloatJSONF(f'addition__{rating_field}')),
Expand All @@ -133,6 +135,7 @@ def get_old_ratings(contest):
).distinct('account').values('member', 'latest_rating')

n_contests_filter = Q(contest__start_time__lt=contest.start_time, skip_in_stats=False)
n_contests_filter &= Q(contest__is_rated=True) | Q(contest__is_rated__isnull=True)
statistics = Statistics.objects.filter(
contest=contest,
account__in=accounts,
Expand Down
142 changes: 101 additions & 41 deletions src/ranking/management/modules/codechef.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from first import first
from ratelimiter import RateLimiter

from clist.templatetags.extras import as_number, is_improved_solution
from clist.templatetags.extras import as_number, is_improved_solution, slug
from ranking.management.modules import conf
from ranking.management.modules.common import LOG, REQ, BaseModule, FailOnGetResponse, parsed_table
from ranking.management.modules.excepts import ExceptionParseStandings
Expand All @@ -45,6 +45,19 @@ def __init__(self, **kwargs):
self._username = conf.CODECHEF_USERNAME
self._password = conf.CODECHEF_PASSWORD

@staticmethod
def _get_headers_with_csrf_token(key):
standings_url = Statistic.STANDINGS_URL_FORMAT_.format(key=key)
page = REQ.get(standings_url)
for regex in (
r'window.csrfToken\s*=\s*"([^"]*)"',
'<input[^>]*name="csrfToken"[^>]*id="edit-csrfToken"[^>]*value="([^"]*)"',
):
match = re.search(regex, page)
if match:
csrf_token = match.group(1)
return {'x-csrf-token': csrf_token, 'x-requested-with': 'XMLHttpRequest'}

def get_standings(self, users=None, statistics=None, **kwargs):
# REQ.get('https://www.codechef.com/')

Expand Down Expand Up @@ -95,19 +108,9 @@ def get_standings(self, users=None, statistics=None, **kwargs):
writers = defaultdict(int)

for key, contest_info in contest_infos.items():
standings_url = self.STANDINGS_URL_FORMAT_.format(key=key)
page = REQ.get(standings_url)
for regex in (
r'window.csrfToken\s*=\s*"([^"]*)"',
'<input[^>]*name="csrfToken"[^>]*id="edit-csrfToken"[^>]*value="([^"]*)"',
):
match = re.search(regex, page)
if match:
break
if not match:
headers = Statistic._get_headers_with_csrf_token(key)
if not headers:
raise ExceptionParseStandings('not found csrf token')
csrf_token = match.group(1)
headers = {'x-csrf-token': csrf_token, 'x-requested-with': 'XMLHttpRequest'}

n_page = 0
per_page = 150
Expand Down Expand Up @@ -168,32 +171,13 @@ def get_standings(self, users=None, statistics=None, **kwargs):
}
d.append(problem_info)

if code not in problems_data:
problem_url = self.API_PROBLEM_URL_FORMAT_.format(key='PRACTICE', code=code)
problem_data = REQ.get(problem_url,
headers=headers,
return_json=True,
ignore_codes={404, 403})

if problem_data.get('status') == 'error':
problem_info['url'] = self.CONTEST_PROBLEM_URL_FORMAT_.format(key=key, code=code)
problem_url = self.API_PROBLEM_URL_FORMAT_.format(key=key, code=code)
problem_data = REQ.get(problem_url,
headers=headers,
return_json=True,
ignore_codes={404, 403})

writer = problem_data.get('problem_author')
if writer:
writers[writer] += 1
problems_data[code]['writers'] = [writer]

tags = problem_data.get('tags')
if tags:
matches = re.findall('<a[^>]*>([^<]+)</a>', tags)
problems_data[code]['tags'] = matches

problem_info.update(problems_data[code])
more_problem_info = self.get_problem_info(problem_info,
contest=self.contest,
cache=problems_data)
problem_info.update(more_problem_info)

for writer in problem_info.get('writers', []):
writers[writer] += 1

n_total_page = data['availablePages']
pbar = tqdm.tqdm(total=n_total_page * len(urls))
Expand All @@ -213,7 +197,10 @@ def get_standings(self, users=None, statistics=None, **kwargs):
row['solving'] = d.pop('score')
for k in 'time', 'total_time':
if k in d:
row['time'] = d.pop(k)
t = d.pop(k)
if t.startswith('-'):
continue
row['time'] = t
break

stats = (statistics or {}).get(handle, {})
Expand Down Expand Up @@ -251,7 +238,11 @@ def get_standings(self, users=None, statistics=None, **kwargs):

problem = problems.setdefault(k, {})
if k in unscored_problems:
problem = problem.setdefault('upsolving', {})
problem_upsolving = problem.setdefault('upsolving', {})
if not isinstance(problem_upsolving, dict):
problem_upsolving = {'result': problem_upsolving}
problem['upsolving'] = problem_upsolving
problem = problem_upsolving
if not is_improved_solution(v, problem):
continue
problem.update(v)
Expand Down Expand Up @@ -615,3 +606,72 @@ def process_pagination():
account.info['submissions_'] = submissions_info
account.save(update_fields=['info', 'last_submission'])
return ret

@staticmethod
def get_problem_info(problem, contest, cache, **kwargs):
code = problem['code']
if code in cache:
return cache[code]
problem_info = cache.setdefault(code, {})

problem_url = Statistic.API_PROBLEM_URL_FORMAT_.format(key='PRACTICE', code=code)
problem_data = REQ.get(problem_url, return_json=True, ignore_codes={404, 403})

if problem_data.get('status') == 'error':
problem_info['url'] = Statistic.CONTEST_PROBLEM_URL_FORMAT_.format(key=contest.key, code=code)
problem_url = Statistic.API_PROBLEM_URL_FORMAT_.format(key=contest.key, code=code)
problem_data = REQ.get(problem_url, return_json=True, ignore_codes={404, 403})

problem_data.pop('body', None)
problem_data.pop('problemComponents', None)
problem_data.pop('practice_special_banner', None)
problem_data.pop('problem_display_authors_html_handle', None)

problem_writers = []
authors = problem_data.pop('problem_display_authors', None)
if authors:
problem_writers.extend(authors)
author = problem_data.pop('problem_author', None)
if author and author not in problem_writers:
problem_writers.append(author)
if problem_writers:
problem_info['writers'] = problem_writers

problem_tags = []
for tags_field in ('tags', 'user_tags', 'computed_tags'):
tags = problem_data.pop(tags_field, None)
if not tags:
continue
if isinstance(tags, str):
tags = re.findall('<a[^>]*>([^<]+)</a>', tags)
problem_tags.extend(tags)
if problem_tags:
problem_info['tags'] = [slug(t) for t in problem_tags]
languages_supported = problem_data.pop('languages_supported', None)
if languages_supported:
if isinstance(languages_supported, str):
languages_supported = languages_supported.split(', ')
problem_info['languages_supported'] = languages_supported

native_rating = problem_data.pop('difficulty_rating', None)
native_rating = as_number(native_rating, force=True)
if native_rating and native_rating > 0:
problem_info['native_rating'] = native_rating

for field in (
('hints'),
('best_tag'),
('editorial_url'),
('video_editorial_url'),
('max_timelimit'),
('source_sizelimit'),
):
if isinstance(field, tuple):
key, field = field
else:
key = field
value = problem_data.pop(key, None)
if value:
problem_info[field] = value

return problem_info
2 changes: 1 addition & 1 deletion src/ranking/management/modules/common/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def update_submissions(account, resource):
raise NotImplementedError()

@staticmethod
def get_problem_info(problem):
def get_problem_info(problem, **kwargs):
raise NotImplementedError()


Expand Down
4 changes: 2 additions & 2 deletions src/ranking/management/modules/leetcode.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class Statistic(BaseModule):
RANKING_URL_FORMAT_ = '{url}/ranking'
API_SUBMISSION_URL_FORMAT_ = 'https://leetcode{}/api/submissions/{}/'
STATE_FILE = os.path.join(os.path.dirname(__file__), '.leetcode.yaml')
DOMAINS = {'': '.com', 'us': '.com', 'cn': '.cn'}
DOMAINS = {'': '.com', 'us': '.com', 'cn': '.cn', 'ly': '.com'}
API_SUBMISSIONS_URL_FORMAT_ = 'https://leetcode.com/api/submissions/?offset={}&limit={}'

def __init__(self, **kwargs):
Expand Down Expand Up @@ -1011,7 +1011,7 @@ def get_field(*fields, raise_not_found=True):
return ret

@staticmethod
def get_problem_info(problem):
def get_problem_info(problem, **kwargs):
slug = get_item(problem, 'slug')
params = {
'operationName': 'questionData',
Expand Down
2 changes: 1 addition & 1 deletion src/ranking/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -909,7 +909,7 @@ def get_placing(placing, stat):
problem['status'] = 'W'

if self.score_params.get('writers_proportionally_score'):
n_contests = len(contests)
n_contests = sum(contest.start_time < timezone_now for contest in contests)
for account in writers:
row = results[account]
if n_contests == row['writer'] or 'score' not in row:
Expand Down
Loading

0 comments on commit b93933f

Please sign in to comment.