Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -56,21 +56,21 @@ jobs:
with:
fetch-depth: 1
- name: "Caching (for Linux)"
uses: actions/cache@v1
uses: actions/cache@v4
if: startsWith(runner.os, 'linux')
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
restore-keys: ${{ runner.os }}-pip-
- name: "Caching (for Macintosh)"
uses: actions/cache@v1
uses: actions/cache@v4
if: startsWith(runner.os, 'macos')
with:
path: ~/Library/Caches/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
restore-keys: ${{ runner.os }}-pip-
- name: "Caching (for Windows)"
uses: actions/cache@v1
uses: actions/cache@v4
if: startsWith(runner.os, 'windows')
with:
path: ~\AppData\Local\pip\Cache
Expand Down
1 change: 1 addition & 0 deletions client/api/assignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,7 @@ def server_url(self):
"grading",
"analytics",
"help",
"followup",
"autostyle",
"collaborate",
"hinting",
Expand Down
66 changes: 66 additions & 0 deletions client/protocols/common/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,69 @@ def run(self, messages):
"""
raise NotImplementedError

import os
import pickle
import hmac
class ResearchProtocol(Protocol):
"""Helper attributes and methods for 61A-bot research project with larynqi@, zamfi@, norouzi@, denero@"""

SERVER = 'https://61a-bot-backend.zamfi.net'
SERVER_KEY = 'jfv97pd8ogybhilq3;orfuwyhiulae'
CS61A_ENDPOINT = 'cs61a'
C88C_ENDPOINT = 'c88c'
CS61A_ID = '61a'
C88C_ID = '88c'
UNKNOWN_COURSE = '<unknown course>'

GET_CONSENT = True
CONSENT_CACHE = '.ok_consent'
NO_CONSENT_OPTIONS = {"n", "no", "0", "-1", }
CONSENT_MESSAGE = "Can we collect your de-identified data for research directed by Prof. Narges Norouzi (EECS faculty member unaffiliated with this course)? Your consent is voluntary and does not affect your ability to use this tool or your course grade. For more information visit https://cs61a.org/articles/61a-bot\n\nYou can change your response at any time by running `python3 ok --consent`."

def _mac(self, key, value):
mac = hmac.new(key.encode('utf-8'), digestmod='sha512')
mac.update(repr(value).encode('utf-8'))
return mac.hexdigest()

def _get_consent(self, email):
if self.GET_CONSENT:
if self.CONSENT_CACHE in os.listdir() and not self.args.consent:
try:
with open(self.CONSENT_CACHE, 'rb') as f:
data = pickle.load(f)
if not hmac.compare_digest(data.get('mac'), self._mac(email, data.get('consent'))):
os.remove(self.CONSENT_CACHE)
return self._get_consent(email)
return data.get('consent')
except:
os.remove(self.CONSENT_CACHE)
return self._get_consent(email)
else:
print(self.CONSENT_MESSAGE)
res = input("\n(Y/n)? ").lower()
consent = res not in self.NO_CONSENT_OPTIONS
if consent:
print("\nYou have consented.\n")
else:
print("\nYou have not consented.\n")
with open(self.CONSENT_CACHE, 'wb') as f:
pickle.dump({'consent': consent, 'mac': self._mac(email, consent)}, f, protocol=pickle.HIGHEST_PROTOCOL)
return consent
else:
return False

def _check_solved(self, messages):
tests = self.assignment.specified_tests
grading_analytics = messages.get('grading', {})
active_function = tests[-1].name
for test in tests:
name = test.name
if name in grading_analytics and grading_analytics[name]['failed'] > 0:
return {
'failed': True,
'active_function': name
}
return {
'failed': False,
'active_function': active_function
}
84 changes: 84 additions & 0 deletions client/protocols/followup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
"""61A-bot.

Spring 2025 feature with larynqi@, zamfi@, norouzi@, denero@
"""

from client.protocols.common import models
from client.utils import config as config_utils
from client.utils import format

import os
import logging
import json

from client.utils.printer import print_error

from client.protocols.unlock import UnlockProtocol

log = logging.getLogger(__name__)

class FollowupProtocol(models.ResearchProtocol, UnlockProtocol):

PROTOCOL_NAME = 'followup'
FOLLOWUP_ENDPOINT = models.ResearchProtocol.SERVER + '/questions'
GET_CONSENT = True
FOLLOWUPS_FILE = 'followups.json'

def run(self, messages):
config = config_utils._get_config(self.args.config)

if self.PROTOCOL_NAME not in config.get('protocols', []):
return

check_solved = self._check_solved(messages)
failed, active_function = check_solved['failed'], check_solved['active_function']
if failed:
return

if self.FOLLOWUPS_FILE not in os.listdir():
followup_data = []
else:
followup_data = json.loads(open(self.FOLLOWUPS_FILE).read())
followup_queue = []
for entry in followup_data:
if entry['name'] == active_function:
for followup in entry['followups']:
if not followup['response']:
followup_queue.append(followup)
if len(followup_queue) > 0:
format.print_line('~')
print('Follow-up questions')
print()

print('At each "{}", type what you think the best answer is. YOUR ANSWERS WILL NOT BE GRADED'.format(
self.PROMPT))
print('Type {} to quit'.format(self.EXIT_INPUTS[0]))
print()


for followup in followup_queue:
response = self._ask_followup(followup)
followup['response'] = response

with open(self.FOLLOWUPS_FILE, 'w') as f:
f.write(json.dumps(followup_data, indent=2))

def _ask_followup(self, followup):
question, choices = followup['question'], followup['choices']
print(question)
print()
for c in choices:
print(c)
print()
valid_responses = [chr(ord('A') + i) for i in range(len(choices))] + [chr(ord('a') + i) for i in range(len(choices))] + list(self.EXIT_INPUTS)
response = None
while response not in valid_responses:
response = input(self.PROMPT)
if response not in valid_responses:
print("-- Please select a provided option. --\n")

if response not in self.EXIT_INPUTS:
print(f'LOG: received {response.upper()} from student')
return response.upper()

protocol = FollowupProtocol
65 changes: 9 additions & 56 deletions client/protocols/help.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,12 @@

from client.utils.printer import print_error

class HelpProtocol(models.Protocol):
class HelpProtocol(models.ResearchProtocol):

SERVER = 'https://61a-bot-backend.zamfi.net'
HELP_ENDPOINT = SERVER + '/get-help-cli'
HELP_ENDPOINT = models.ResearchProtocol.SERVER + '/get-help-cli'
FEEDBACK_PROBABILITY = 1
FEEDBACK_REQUIRED = False
FEEDBACK_ENDPOINT = SERVER + '/feedback'
FEEDBACK_ENDPOINT = models.ResearchProtocol.SERVER + '/feedback'
FEEDBACK_KEY = 'jfv97pd8ogybhilq3;orfuwyhiulae'
FEEDBACK_MESSAGE = "The hint was... (Press return/enter to skip)\n1) Helpful, all fixed\n2) Helpful, not all fixed\n3) Not helpful, but made sense\n4) Not helpful, didn't make sense\n5) Misleading/Wrong\n"
FEEDBACK_OPTIONS = set([str(i) for i in range(1, 6)])
Expand All @@ -38,12 +37,10 @@ class HelpProtocol(models.Protocol):
}
NO_HELP_TYPE_OPTIONS = {'y'}
DISABLE_HELP_OPTIONS = {"never"}
HELP_KEY = 'jfv97pd8ogybhilq3;orfuwyhiulae'
AG_PREFIX = "————————————————————————\nThe following is an automated report from an autograding tool that may indicate a failed test case or a syntax error. Consider it in your response.\n\n"

GET_CONSENT = True
CONSENT_CACHE = '.ok_consent'
NO_CONSENT_OPTIONS = {"n", "no", "0", "-1", }
CONSENT_MESSAGE = "Can we collect your de-identified data for research directed by Prof. Narges Norouzi (EECS faculty member unaffiliated with this course)? Your consent is voluntary and does not affect your ability to use this tool or your course grade. For more information visit https://cs61a.org/articles/61a-bot\n\nYou can change your response at any time by running `python3 ok --consent`."

CONTEXT_CACHE = '.ok_context'
CONTEXT_LENGTH = 3
DISABLED_CACHE = '.ok_disabled'
Expand All @@ -53,11 +50,6 @@ class HelpProtocol(models.Protocol):
NO_HELP_TYPE_PROMPT = BOT_PREFIX + "Would you like to receive 61A-bot feedback on your code (y/N/never)? "
HELP_TYPE_ENABLED = False
HELP_TYPE_DISABLED_MESSAGE = '<help type disabled>'
CS61A_ENDPOINT = 'cs61a'
C88C_ENDPOINT = 'c88c'
CS61A_ID = '61a'
C88C_ID = '88c'
UNKNOWN_COURSE = '<unknown course>'

def run(self, messages):
config = config_utils._get_config(self.args.config)
Expand All @@ -72,16 +64,8 @@ def run(self, messages):
else:
course_id = self.UNKNOWN_COURSE

tests = self.assignment.specified_tests
grading_analytics = messages.get('grading', {})
failed = False
active_function = tests[-1].name
for test in tests:
name = test.name
if name in grading_analytics and grading_analytics[name]['failed'] > 0:
failed = True
active_function = name
break
check_solved = self._check_solved(messages)
failed, active_function = check_solved['failed'], check_solved['active_function']

get_help = self.args.get_help
help_payload = None
Expand Down Expand Up @@ -111,7 +95,7 @@ def run(self, messages):
'code': code if len(context) == 0 else '',
'codeError': self.AG_PREFIX + autograder_output,
'version': 'v2',
'key': self.HELP_KEY,
'key': self.SERVER_KEY,
'consent': consent,
'messages': context + [curr_message],
'studentQuery': student_query,
Expand All @@ -136,6 +120,7 @@ def animate():
try:
help_response = requests.post(self.HELP_ENDPOINT, json=help_payload).json()
except Exception as e:
# print(requests.post(self.HELP_ENDPOINT, json=help_payload))
print_error("Error generating hint. Please try again later.")
return
if 'output' not in help_response:
Expand Down Expand Up @@ -218,38 +203,6 @@ def _get_binary_feedback(self, req_id):
feedback_response = requests.post(self.FEEDBACK_ENDPOINT, json=feedback_payload).json()
return feedback_response.get('status')

def _mac(self, key, value):
mac = hmac.new(key.encode('utf-8'), digestmod='sha512')
mac.update(repr(value).encode('utf-8'))
return mac.hexdigest()

def _get_consent(self, email):
if self.GET_CONSENT:
if self.CONSENT_CACHE in os.listdir() and not self.args.consent:
try:
with open(self.CONSENT_CACHE, 'rb') as f:
data = pickle.load(f)
if not hmac.compare_digest(data.get('mac'), self._mac(email, data.get('consent'))):
os.remove(self.CONSENT_CACHE)
return self._get_consent(email)
return data.get('consent')
except:
os.remove(self.CONSENT_CACHE)
return self._get_consent(email)
else:
print(self.CONSENT_MESSAGE)
res = input("\n(Y/n)? ").lower()
consent = res not in self.NO_CONSENT_OPTIONS
if consent:
print("\nYou have consented.\n")
else:
print("\nYou have not consented.\n")
with open(self.CONSENT_CACHE, 'wb') as f:
pickle.dump({'consent': consent, 'mac': self._mac(email, consent)}, f, protocol=pickle.HIGHEST_PROTOCOL)
return consent
else:
return False

def _get_context(self, email, full=False):
if self.CONTEXT_CACHE in os.listdir():
try:
Expand Down