forked from P-r-e-m-i-u-m/mee-you-want-to-see
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathjob_agents.py
More file actions
296 lines (250 loc) · 11.4 KB
/
job_agents.py
File metadata and controls
296 lines (250 loc) · 11.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
import os
import logging
import asyncio
from agents import (
Agent,
OpenAIChatCompletionsModel,
Runner,
set_tracing_disabled,
)
from agents.mcp import MCPServer
from openai import AsyncOpenAI
logger = logging.getLogger(__name__)
async def run_analysis(mcp_server: MCPServer, linkedin_url: str):
logger.info(f"Starting analysis for LinkedIn URL: {linkedin_url}")
api_key = os.environ["NEBIUS_API_KEY"]
base_url = "https://api.tokenfactory.nebius.com/v1"
client = AsyncOpenAI(base_url=base_url, api_key=api_key)
set_tracing_disabled(disabled=True)
linkedin_agent = Agent(
name="LinkedIn Profile Analyzer",
instructions=f"""You are a LinkedIn profile analyzer.
Analyze profiles for:
- Professional experience and career progression
- Education and certifications
- Core skills and expertise
- Current role and company
- Previous roles and achievements
- Industry reputation (recommendations/endorsements)
Provide a structured analysis with bullet points and a brief executive summary.
NOTE: If the user has no experience, just say "No experience found" and don't make up any information. Also if any of the information is not available, just say "Not available" and don't make up any information.
DISCLAIMER: This Agent should call the tool to get the information. Once the tool is called, it will return the information in the response. It should not call the tool Multiple times after the tool is called.
""",
mcp_servers=[mcp_server],
model=OpenAIChatCompletionsModel(
model="meta-llama/Llama-3.3-70B-Instruct",
openai_client=client
)
)
job_suggestions_agent = Agent(
name="Job Suggestions",
instructions=f"""You are a domain classifier that identifies the primary professional domain from a LinkedIn profile.
Select ONE domain from:
- Software Engineering (for programming, development, technical skills)
- Design & UI/UX (for design, user experience, visual skills)
- Product Management (for product strategy, roadmap, feature planning)
- Recruiting & HR (for talent acquisition, HR operations, people management)
- Sales (for sales, business development)
- Science (for research, data science, scientific expertise)
- Marketing (for Content Writing, Technical Writing, Developer Relations,Developer Advocacy, advertising, brand management)
Rules:
- Choose based on PRIMARY skills and experience
- If multiple domains exist, pick the most recent/relevant one
- Default to Software Engineering if unclear
- Return "No experience found" for empty profiles
- Never make up or assume skills
Format response as JSON:
{{
"selected_domain": "chosen domain",
"confidence_score": 0-100,
"selection_reason": "brief explanation"
}}
""",
model=OpenAIChatCompletionsModel(
model="meta-llama/Llama-3.3-70B-Instruct",
openai_client=client
)
)
url_generator_agent = Agent(
name="URL Generator",
instructions=f"""You are a URL generator that creates Y Combinator job board URLs based on domains.
Input: JSON from job suggestions agent with format:
{{
"selected_domain": "domain name",
"confidence_score": number,
"selection_reason": "reason"
}}
Map domains to URLs:
- "Software Engineering" -> "ycombinator.com/jobs/role/software-engineer"
- "Design & UI/UX" -> "ycombinator.com/jobs/role/designer"
- "Product Management" -> "ycombinator.com/jobs/role/product-manager"
- "Recruiting & HR" -> "ycombinator.com/jobs/role/recruiting-hr"
- "Sales" -> "ycombinator.com/jobs/role/sales-manager"
- "Science" -> "ycombinator.com/jobs/role/science"
- "Marketing" -> "ycombinator.com/jobs/role/marketing"
Output format:
{{
"job_board_url": "mapped url",
"domain": "original domain"
}}
Rules:
- Return exact URL match for domain
- If domain not found, return "ycombinator.com/jobs"
- Keep original domain in output
""",
model=OpenAIChatCompletionsModel(
model="meta-llama/Llama-3.3-70B-Instruct",
openai_client=client
)
)
Job_search_agent = Agent(
name="Job Finder",
instructions=f"""You are a job finder that extracts job listings from Y Combinator's job board.
Steps:
1. Take the URL from job_link_result agent's JSON response
2. Use the provided URL to fetch job listings ONCE.
3. For each job listing, extract ONLY these fields:
- Company name (from the company link text)
- Job title (from the job link text)
- Job type (Full-time/Part-time/Contract)
- Location (including remote status)
- Apply URL (from the Apply button href)
4. Format output as:
## Job Matches for [Domain]
### [Job Title]
- **Company:** [Company Name]
- **Type:** [Job Type]
- **Location:** [Location]
- **Apply:** [Apply URL]
Rules:
- Only extract information from the first 5 most relevant job listings
- Skip navigation links, footer content, and other non-job elements
- Use exact text from the job listing
- Return "No jobs found" if no listings are available
- Ignore job categories and location filters
- Do not include any additional information not present in the job listing
- IMPORTANT: Call the tool EXACTLY ONCE to fetch the job listings
Note: No information should be added to the response that is not provided in the input. Don't make up any information.
""",
mcp_servers=[mcp_server],
model=OpenAIChatCompletionsModel(
model="meta-llama/Llama-3.3-70B-Instruct",
openai_client=client
)
)
url_parser_agent = Agent(
name="URL Parser",
instructions=f"""You are a URL parser that transforms Y Combinator authentication URLs into direct job URLs.
Input: Job listings with authentication URLs in format:
## Job Matches for [Domain]
### [Job Title]
- **Company:** [Company Name]
- **Type:** [Job Type]
- **Location:** [Location]
- **Apply:** [Auth URL]
Rules:
1. Extract job_id from the authentication URL
- Look for 'signup_job_id=' parameter
- Example: from '...signup_job_id=75187...' extract '75187'
2. Create new direct URL format:
- Base URL: 'https://www.workatastartup.com/jobs/'
- Append job_id
- Example: 'https://www.workatastartup.com/jobs/75187'
3. Replace the Apply URL in each job listing with the new direct URL
Output format:
## Job Matches for [Domain]
### [Job Title]
- **Company:** [Company Name]
- **Type:** [Job Type]
- **Location:** [Location]
- **Apply:** [Direct URL]
Note: Keep all other information exactly the same, only transform the Apply URLs.
""",
model=OpenAIChatCompletionsModel(
model="meta-llama/Llama-3.3-70B-Instruct",
openai_client=client
)
)
summary_agent = Agent(
name="Summary Agent",
instructions=f"""You are a summary agent that creates comprehensive career analysis reports.
Your task is to:
1. Take the inputs from various agents (LinkedIn analysis, job suggestions, and job matches)
2. Create a well-structured, professional summary in markdown format that includes:
- A concise profile summary
- Top skills identified
- Recommended career paths
- Detailed role suggestions with reasons and requirements
- Current job matches with match scores
- Skills to develop
- Career development suggestions
Format your response in markdown with the following structure:
```markdown
## 👤 Profile Summary
[Write a brief summary of the person's career profile]
## 🎯 Your Top Skills:
- [Skill 1]
- [Skill 2]
...
## 💡 Suggested Roles:
### [Role Title]
- **Why this role?** [Explanation]
- **Required Skills:** [Skill 1, Skill 2, ...]
- **Potential Companies:** [Company 1, Company 2, ...]
- **Growth Potential:** [Growth opportunities]
- **Salary Range:** [Salary range if available]
## 💼 Current Job Matches:
### [Job Title] at [Company]
- [Brief description]
- Match Score: [Score]%
- [Apply Here]([Job URL])
...
```
Note: No information should be added to the response that is not provided in the input. Don't make up any information.
Ensure your response is well-formatted markdown that can be directly displayed.""",
model=OpenAIChatCompletionsModel(
model="meta-llama/Llama-3.3-70B-Instruct",
openai_client=client
)
)
query = f"""Analyze the LinkedIn profile at {linkedin_url}.
Focus on gathering comprehensive information about the person's professional background.
Then, find the best job for the user based on their profile.
"""
try:
# Get LinkedIn profile analysis
logger.info("Running LinkedIn profile analysis")
linkedin_result = await Runner.run(starting_agent=linkedin_agent, input=query)
logger.info("LinkedIn profile analysis completed")
# Get job suggestions
logger.info("Getting job suggestions")
suggestions_result = await Runner.run(starting_agent=job_suggestions_agent, input=linkedin_result.final_output)
logger.info("Job suggestions completed")
# Get specific job matches
logger.info("Getting job link")
job_link_result = await Runner.run(starting_agent=url_generator_agent, input=suggestions_result.final_output)
logger.info("Job link generation completed")
# Get job matches
logger.info("Getting job matches")
job_search_result = await Runner.run(starting_agent=Job_search_agent, input=job_link_result.final_output)
logger.info("Job search completed")
# Parse URLs to get direct job links
logger.info("Parsing job URLs")
parsed_urls_result = await Runner.run(starting_agent=url_parser_agent, input=job_search_result.final_output)
logger.info("URL parsing completed")
# Create a single input for the summary agent
logger.info("Generating final summary")
summary_input = f"""LinkedIn Profile Analysis:
{linkedin_result.final_output}
Job Suggestions:
{suggestions_result.final_output}
Job Matches:
{parsed_urls_result.final_output}
Please analyze the above information and create a comprehensive career analysis report in markdown format."""
# Get final summary with a single call
summary_result = await Runner.run(starting_agent=summary_agent, input=summary_input)
logger.info("Summary generation completed")
return summary_result.final_output
except Exception as e:
logger.error(f"Error during analysis: {str(e)}")
raise e