Assign Contributor Labels

L3
ModelContextProtocolGithubMissing Semester

Assign labels to open issues and PRs based on contributors mentioned in comments or the most frequent contributor from past 100 commits, using assigned-username format.

Created by Zijian Wu
2025-08-15
Issue ManagementLabel AutomationContributor Analysis

Model Ranking

Click on the dots to view the trajectory of each task run
Model
Run Results
Pass@4
Pass^4
Avg Time
Avg Turns
Input Tokens
Output Tokens
Total Tokens
Claude
claude-opus-4-1
0
/1
--
110.9s
3.0
370,537
829
371,366
Claude
claude-opus-4-5-high
0
/4
100.8s
4.5
285,724
3,658
289,382
Claude
claude-sonnet-4
0
/4
57.9s
3.0
367,281
527
367,808
Claude
claude-sonnet-4-5
0
/4
89.8s
5.5
439,344
3,326
442,670
Claude
claude-sonnet-4-high
0
/4
178.2s
19.0
1,503,600
3,219
1,506,818
Claude
claude-sonnet-4-low
0
/4
213.6s
19.3
1,528,821
3,268
1,532,089
DeepSeek
deepseek-chat
0
/4
18.2s
1.0
14,284
114
14,398
DeepSeek
deepseek-v3-1-terminus
0
/4
253.0s
9.8
673,328
1,335
674,662
DeepSeek
deepseek-v3-1-terminus-thinking
0
/4
1781.1s
10.0
679,718
36,512
716,230
DeepSeek
deepseek-v3-2-chat
0
/4
261.3s
22.8
1,438,460
3,791
1,442,252
DeepSeek
deepseek-v3-2-thinking
0
/4
385.1s
26.3
1,766,107
7,477
1,773,584
Gemini
gemini-2-5-flash
0
/4
190.4s
3.0
428,890
36,951
465,841
Gemini
gemini-2-5-pro
0
/4
52.9s
2.0
185,804
4,064
189,868
Gemini
gemini-3-pro-high
0
/4
154.1s
12.3
792,218
6,898
799,116
Gemini
gemini-3-pro-low
0
/4
170.7s
17.3
1,250,586
6,868
1,257,454
Z.ai
glm-4-5
0
/4
33.1s
3.0
125,983
713
126,696
OpenAI
gpt-4-1
0
/4
49.4s
5.0
224,850
894
225,743
OpenAI
gpt-4-1-mini
0
/4
92.2s
7.8
461,836
1,788
463,625
OpenAI
gpt-4-1-nano
0
/4
81.8s
16.8
1,247,616
1,237
1,248,853
OpenAI
gpt-5-high
0
/4
857.4s
8.3
428,704
32,272
460,975
OpenAI
gpt-5-low
0
/4
453.9s
13.0
1,936,923
15,657
1,952,580
OpenAI
gpt-5-medium
0
/4
402.5s
8.3
459,460
19,426
478,885
OpenAI
gpt-5-mini-high
0
/4
701.9s
19.0
2,050,138
56,996
2,107,134
OpenAI
gpt-5-mini-low
0
/4
125.9s
13.0
1,843,257
1,817
1,845,074
OpenAI
gpt-5-mini-medium
0
/4
181.9s
16.8
932,611
13,392
946,003
OpenAI
gpt-5-nano-high
0
/4
225.5s
5.5
266,132
42,771
308,902
OpenAI
gpt-5-nano-low
0
/4
33.9s
3.0
107,074
2,504
109,578
OpenAI
gpt-5-nano-medium
0
/4
154.4s
6.3
356,577
23,022
379,599
OpenAI
gpt-oss-120b
0
/4
21.0s
3.8
118,676
912
119,588
Grok
grok-4
0
/4
440.7s
9.5
987,075
6,879
1,000,609
Grok
grok-4-fast
0
/4
59.5s
6.0
180,397
4,689
185,086
Grok
grok-code-fast-1
0
/4
73.3s
19.8
1,126,279
4,690
1,130,968
MoonshotAI
kimi-k2-0711
0
/4
125.9s
3.0
273,629
154
273,783
MoonshotAI
kimi-k2-0905
0
/4
499.1s
19.3
1,098,003
1,745
1,099,748
OpenAI
o3
0
/4
105.7s
7.5
961,841
1,558
963,398
OpenAI
o4-mini
0
/4
263.8s
6.8
853,693
5,501
859,194
Qwen
qwen-3-coder-plus
0
/4
172.8s
13.8
1,805,619
1,409
1,807,028
Qwen
qwen-3-max
0
/4
133.8s
18.0
1,237,112
1,048
1,238,160

Task State


Instruction

Assign assignees for each open issue and open PR by adding labels instead of using direct assignees. Only contributors who appeared in the past 100 commits are considered. First, collect all such contributors and identify the most frequent author among them. For each open issue or PR, assign using labels according to the following rules: • If the comments mention an author with @username, add a label in the format assigned-username. • If multiple authors are mentioned, add labels in the same format for all of them. • If no authors are mentioned in the comments, add a label for the most frequent contributor from the past 100 commits, using the format assigned-username.



Verify

*.py
Python
import sys
import os
import requests
from typing import Dict, Optional, Tuple, List
from dotenv import load_dotenv


def _get_github_api(
    endpoint: str, headers: Dict[str, str], org: str, repo: str = "missing-semester"
) -> Tuple[bool, Optional[Dict]]:
    """Make a GET request to GitHub API and return (success, response)."""
    url = f"https://api.github.com/repos/{org}/{repo}/{endpoint}"
    
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            return True, response.json()
        elif response.status_code == 404:
            return False, None
        else:
            print(f"API error for {endpoint}: {response.status_code}", file=sys.stderr)
            return False, None
    except Exception as e:
        print(f"Exception for {endpoint}: {e}", file=sys.stderr)
        return False, None


def _get_issue_labels(
    issue_number: int,
    headers: Dict[str, str],
    org: str,
    repo: str = "missing-semester"
) -> Optional[List[str]]:
    """Get labels for a specific issue/PR."""
    success, result = _get_github_api(f"issues/{issue_number}", headers, org, repo)
    if not success or not result:
        return None
    
    labels = result.get("labels", [])
    return [label["name"] for label in labels]


def verify() -> bool:
    """
    Programmatically verify that the labels were assigned correctly to issues and PRs.
    """
    # Load environment variables from .mcp_env
    load_dotenv(".mcp_env")

    # Get GitHub token and org
    github_token = os.environ.get("MCP_GITHUB_TOKEN")
    github_org = os.environ.get("GITHUB_EVAL_ORG")

    if not github_token:
        print("Error: MCP_GITHUB_TOKEN environment variable not set", file=sys.stderr)
        return False

    if not github_org:
        print("Error: GITHUB_EVAL_ORG environment variable not set", file=sys.stderr)
        return False

    headers = {
        "Authorization": f"Bearer {github_token}",
        "Accept": "application/vnd.github.v3+json",
    }

    print("Verifying contributor labels assignment task completion...")

    # Expected labels configuration
    expected_labels = {
        # Issues
        9: ["assigned-jonhoo", "assigned-anishathalye"],  # Issue #9
        14: ["assigned-jonhoo", "assigned-anishathalye"],  # Issue #14
        15: ["assigned-anishathalye"],  # Issue #15
        # PRs
        21: ["assigned-anishathalye"],  # PR #21
        22: ["assigned-anishathalye"],  # PR #22
        23: ["assigned-anishathalye"],  # PR #23
        24: ["assigned-anishathalye"],  # PR #24
    }

    all_passed = True

    for item_number, expected in expected_labels.items():
        item_type = "Issue" if item_number in [9, 14, 15] else "PR"
        print(f"\nChecking {item_type} #{item_number}...")
        
        labels = _get_issue_labels(item_number, headers, github_org, "missing-semester")
        
        if labels is None:
            print(f"  ❌ Failed to retrieve {item_type} #{item_number}", file=sys.stderr)
            all_passed = False
            continue
        
        # Sort both lists for comparison
        labels_sorted = sorted(labels)
        expected_sorted = sorted(expected)
        
        if labels_sorted == expected_sorted:
            print(f"  ✅ {item_type} #{item_number} has correct labels: {labels_sorted}")
        else:
            print(f"  ❌ {item_type} #{item_number} has incorrect labels", file=sys.stderr)
            print(f"     Expected: {expected_sorted}", file=sys.stderr)
            print(f"     Found: {labels_sorted}", file=sys.stderr)
            all_passed = False

    if all_passed:
        print("\n✅ All verification checks passed!")
        print("Contributor labels assignment task completed successfully:")
        print("  - Issues #9 and #14 have both 'assigned-jonhoo' and 'assigned-anishathalye' labels")
        print("  - Issue #15 and all 4 open PRs have 'assigned-anishathalye' label")
    else:
        print("\n❌ Some verification checks failed", file=sys.stderr)

    return all_passed


if __name__ == "__main__":
    success = verify()
    sys.exit(0 if success else 1)