Skip to content
This repository was archived by the owner on Sep 1, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 15 additions & 9 deletions AzureAD.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,18 +31,24 @@ def __init__(self, tenantId: str) -> None:



async def getCWIDFromEmail(self, username: str) -> str:
query = UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
select=["employeeId"],
)
async def getCWIDFromEmail(self, usernames: list[str]) -> list[tuple[str, str]]:
cwidMap = []

requestConfig = UsersRequestBuilder.UsersRequestBuilderGetRequestConfiguration(query_parameters=query)
userCwid = await self.client.users.by_user_id(username).get(requestConfig)
for i in range(0, len(usernames), 14):
query = UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
select=["employeeId", "userPrincipalName"],
filter=f"userPrincipalName in ['{'\',\''.join(usernames[i:i+14])}'] and accountEnabled eq true",
)

if userCwid is None or userCwid.employee_id is None:
return ""
requestConfig = UsersRequestBuilder.UsersRequestBuilderGetRequestConfiguration(query_parameters=query)
userCwids = await self.client.users.get(requestConfig)

if userCwids is None:
continue

cwidMap.extend([(val.user_principal_name, val.employee_id) for val in userCwids.value])

return userCwid.employee_id
return cwidMap

async def getEmailFromCWID(self, cwid: str) -> str:
query = UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
Expand Down
69 changes: 68 additions & 1 deletion FileHelpers/csvLoaders.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# The biggest change that needs to be made is to remap mutlipasses to cwids

import csv
import re
import sys
from typing import List, Dict

import pandas as pd
from FileHelpers import fileHelper
Expand All @@ -12,6 +15,7 @@
# Grace Period of 15 minutes
GRADESCOPE_GRACE_PERIOD = 15

csv.field_size_limit(sys.maxsize)

def loadCSV(_filename: str, promptIfError: bool = False, directoriesToCheck: list[str] = None):
"""
Expand Down Expand Up @@ -115,6 +119,69 @@ def loadGradescope(_filename):
print("Done.")
return gradescopeDF

def extractGroupFromPL(group: str):
r = re.compile(r"[\[\"\]]")

group = re.sub(r, "", group)

return group.split(",")

def convertGroupSubmissionToIndividualSubmission(header: List[str], data: List[List[str]]):
GROUP_MEMBER_IDX = header.index("Usernames")
SUBMISSION_DATE_INDEX = header.index("Submission date")
QUESTION_POINTS_IDX = header.index("Question points")

normalizedSubmission = []

for line in data:
members = extractGroupFromPL(line[GROUP_MEMBER_IDX])
for member in members:
normalizedSubmission.append([member, line[SUBMISSION_DATE_INDEX], float(line[QUESTION_POINTS_IDX])])

return normalizedSubmission

def parseLinePL(students: Dict[str, List[str]], line: List[str]):
USER_ID_IDX = 0
SUBMISSION_DATE_IDX = 1
POINTS_IDX = 2

if not line[USER_ID_IDX]:
# empty group
return

if line[USER_ID_IDX] in students.keys():
students[line[USER_ID_IDX]][SUBMISSION_DATE_IDX] = line[SUBMISSION_DATE_IDX]
students[line[USER_ID_IDX]][POINTS_IDX] += line[POINTS_IDX]
return

students[line[USER_ID_IDX]] = [line[USER_ID_IDX], line[SUBMISSION_DATE_IDX], line[POINTS_IDX]]

def loadPrairieLearn(filename):
data = []
try:
with open(filename) as r:
reader = csv.reader(r, quotechar='"')
for line in reader:
data.append(line)
except FileNotFoundError:
return pd.DataFrame()

data = convertGroupSubmissionToIndividualSubmission(data[0], data[1:])

scores = {}

for line in data[1:]:
parseLinePL(scores, line)

plDF = pd.DataFrame({
'email': [value[0] for value in scores.values()],
'hours_late': [0 for _ in range(len(scores))],
'Total Score': [value[2] for value in scores.values()],
'Status': ['Graded' for _ in range(len(scores))],
'lateness_comment': ['' for _ in range(len(scores))],
})

return plDF

def loadRunestone(_filename, assignment: str):
"""
Expand Down
32 changes: 21 additions & 11 deletions Grade/gradesheets.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,16 @@
This file **non-destructively** creates gradesheets. The generated gradesheets still have to be 'selected' in Canvas
in order to be scored and posted with everything else.
"""
from typing import List

import pandas as pd

from Bartik.Bartik import Bartik
from AzureAD import AzureAD

async def convertBartikToGradesheet(_azure: AzureAD, _bartik: Bartik, _students: pd.DataFrame, _assignment: str, _maxPoints: float, _requiredProbems: int) -> pd.DataFrame:

async def convertBartikToGradesheet(_azure: AzureAD, _bartik: Bartik, _students: pd.DataFrame, _assignment: str,
_maxPoints: float, _requiredProbems: int) -> pd.DataFrame:
bartikGradesheet: pd.DataFrame = pd.DataFrame()
bartikGradesheet['multipass'] = ""
bartikGradesheet['Total Score'] = ""
Expand All @@ -30,12 +34,11 @@ async def convertBartikToGradesheet(_azure: AzureAD, _bartik: Bartik, _students:
print(f"Now grading {row['name']} ({counter}/{len(_students)})...", end="")

studentEmail: str = await _azure.getEmailFromCWID(row['sis_id'])

if studentEmail == "":
print(f"Failed to map email for {row['name']}")
continue


missing: bool = False
score: float = 0

Expand All @@ -46,16 +49,15 @@ async def convertBartikToGradesheet(_azure: AzureAD, _bartik: Bartik, _students:
print(f"Missing")

bartikGradesheet = pd.concat([bartikGradesheet, pd.DataFrame(
{
'multipass': row['sis_id'],
'Total Score': score,
'lateness_comment': "",
}, index=[0]
)], ignore_index=True)
{
'multipass': row['sis_id'],
'Total Score': score,
'lateness_comment': "",
}, index=[0]
)], ignore_index=True)

if not missing:
print("Done")


_bartik.closeSession()
return bartikGradesheet
Expand Down Expand Up @@ -97,7 +99,6 @@ def createGradesheetForPassFailAssignment(_passFailAssignment: pd.DataFrame, _st
checkProofOfAttendance: bool = False,
proofOfAttendanceColumn: (str, None) = None) \
-> pd.DataFrame:

if proofOfAttendanceColumn:
proofOfAttendanceColumn = proofOfAttendanceColumn.replace(' ', '_')

Expand Down Expand Up @@ -141,4 +142,13 @@ def createGradesheetForPassFailAssignment(_passFailAssignment: pd.DataFrame, _st
return _passFailAssignment


async def finalizeGradesheet(azure: AzureAD, assignment: pd.DataFrame):
emails = assignment['email'].tolist()

emailsWithCwids = await azure.getCWIDFromEmail(emails)
assignment['multipass'] = ''

for i, row in assignment.iterrows():
assignment.loc[i, 'multipass'] = [email[1] for email in emailsWithCwids if email[0] == row['email']][0]

return assignment
4 changes: 2 additions & 2 deletions UI/standardGrading.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ async def standardGrading(**kwargs):
gradesheetsToGrade: dict[int, pd.DataFrame] = uiHelpers.setupGradescopeGrades(kwargs['canvas'])
elif choice == 2:
gradesheetsToGrade: dict[int, pd.DataFrame] = uiHelpers.setupRunestoneGrades(kwargs['canvas'])
elif choice == 3:
return NotImplementedError # TODO add PL support
else:
gradesheetsToGrade: dict[int, pd.DataFrame] = await uiHelpers.setupPLGrades(kwargs['canvas'], kwargs['azure'])

specialCasesDF = uiHelpers.setupSpecialCases()

Expand Down
22 changes: 21 additions & 1 deletion UI/uiHelpers.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
"""
"""
import pandas as pd
from FileHelpers.csvLoaders import loadGradescope, loadRunestone

from AzureAD import AzureAD
from FileHelpers.csvLoaders import loadGradescope, loadRunestone, loadPrairieLearn
from FileHelpers.excelLoaders import loadSpecialCases, loadPassFailAssignment
from Canvas import Canvas
from Grade.gradesheets import finalizeGradesheet


def getUserInput(allowedUserInput: str = None, allowedLowerRange: int = None, allowedUpperRange: int = None):
Expand Down Expand Up @@ -93,6 +96,23 @@ def setupGradescopeGrades(_canvas: Canvas) -> dict[int, pd.DataFrame]:

return assignmentMap

async def setupPLGrades(canvas: Canvas, azure: AzureAD) -> dict[int, pd.DataFrame]:
# the IDs will always be unique per course - using those over the common names
selectedAssignments: pd.DataFrame = canvas.getAssignmentsToGrade()
assignmentMap: dict[int, pd.DataFrame] = {}
if selectedAssignments is None:
return assignmentMap
for i, row in selectedAssignments.iterrows():
print(f"Enter path to pl grades for {row['common_name']}")
path = getUserInput(allowedUserInput="./path/to/pl/grades.csv")
plDf: pd.DataFrame = await finalizeGradesheet(azure, loadPrairieLearn(path))
if plDf.empty:
print(f"Failed to load file '{path}'")
# TODO handle this case more elegantly
return {}
assignmentMap[row['id']] = plDf

return assignmentMap

def setupRunestoneGrades(_canvas: Canvas) -> dict[int, pd.DataFrame]:
# the IDs will always be unique per course - using those over the common names
Expand Down