-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path03_bot.py
More file actions
270 lines (217 loc) · 9.57 KB
/
03_bot.py
File metadata and controls
270 lines (217 loc) · 9.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
import pyautogui
import time
import pyperclip
import requests
import json
import random
# ===== FREE AI ALTERNATIVES =====
# Option 1: Hugging Face Inference API (Free, requires signup)
def generate_reply_huggingface(text):
try:
# Sign up at huggingface.co and get your free token
# Replace 'YOUR_TOKEN' with your actual token
headers = {
"Authorization": "your token"
}
API_URL = "https://api-inference.huggingface.co/models/microsoft/DialoGPT-medium"
payload = {"inputs": text}
response = requests.post(API_URL, headers=headers, json=payload)
result = response.json()
if isinstance(result, list) and len(result) > 0:
generated_text = result[0].get('generated_text', '')
# Extract the response part
if text in generated_text:
reply = generated_text.replace(text, '').strip()
return reply if reply else "That's interesting!"
return generated_text
return "Thanks for your message!"
except Exception as e:
print(f"Hugging Face error: {e}")
return generate_simple_reply(text)
# Option 2: Cohere AI (Free tier - 100 calls/month)
def generate_reply_cohere(text):
try:
# Sign up at cohere.ai for free API key
headers = {
"Authorization": "Bearer YOUR_COHERE_API_KEY",
"Content-Type": "application/json"
}
payload = {
"model": "command-light",
"prompt": f"Reply to this message in a friendly way: {text}\nReply:",
"max_tokens": 50,
"temperature": 0.7
}
response = requests.post("https://api.cohere.ai/v1/generate",
headers=headers, json=payload)
result = response.json()
if 'generations' in result and len(result['generations']) > 0:
return result['generations'][0]['text'].strip()
return "Thanks for sharing!"
except Exception as e:
print(f"Cohere error: {e}")
return generate_simple_reply(text)
# Option 3: Google's Gemini API (Free tier available)
def generate_reply_gemini(text):
try:
# Get free API key from Google AI Studio
api_key = "YOUR_GEMINI_API_KEY"
url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key={api_key}"
payload = {
"contents": [{
"parts": [{
"text": f"Reply to this WhatsApp message in a friendly, casual way (keep it short): {text}"
}]
}]
}
response = requests.post(url, json=payload)
result = response.json()
if 'candidates' in result and len(result['candidates']) > 0:
content = result['candidates'][0]['content']['parts'][0]['text']
return content.strip()
return "Thanks for your message!"
except Exception as e:
print(f"Gemini error: {e}")
return generate_simple_reply(text)
# Option 4: Ollama (Run AI models locally - FREE!)
def generate_reply_ollama(text):
try:
# Install Ollama from ollama.ai, then run: ollama pull llama2
url = "http://localhost:11434/api/generate"
payload = {
"model": "llama2",
"prompt": f"Reply to this message in a friendly way: {text}",
"stream": False
}
response = requests.post(url, json=payload)
result = response.json()
if 'response' in result:
return result['response'].strip()
return "Thanks for your message!"
except Exception as e:
print(f"Ollama error: {e}")
return generate_simple_reply(text)
# Option 5: OpenRouter (Free tier with multiple models)
def generate_reply_openrouter(text):
try:
# Sign up at openrouter.ai for free credits
headers = {
"Authorization": "Bearer sk-or-YOUR_OPENROUTER_KEY",
"Content-Type": "application/json"
}
payload = {
"model": "openchat/openchat-7b:free", # Free model
"messages": [
{"role": "user", "content": f"Reply to this message in a friendly way: {text}"}
],
"max_tokens": 50
}
response = requests.post("https://openrouter.ai/api/v1/chat/completions",
headers=headers, json=payload)
result = response.json()
if 'choices' in result and len(result['choices']) > 0:
return result['choices'][0]['message']['content'].strip()
return "Thanks for sharing!"
except Exception as e:
print(f"OpenRouter error: {e}")
return generate_simple_reply(text)
# Option 6: Smart Rule-Based Responses (No API needed - 100% FREE!)
def generate_simple_reply(text):
text_lower = text.lower()
# Urdu/Hindi common words
urdu_responses = {
'salam': ['Walaikum Salam! Kya hal hai?', 'Salam! Kaise ho?'],
'hello': ['Hello! Kaise ho?', 'Hi! Kya kar rahe ho?'],
'kaise ho': ['Main theek hun, tum kaise ho?', 'Alhamdulillah theek hun!'],
'kya kar rahe': ['Kuch khas nahi, tum batao!', 'Bas yahan baithe hain!'],
'thanks': ['Koi baat nahi!', 'Welcome!', 'Mention not!'],
'thank you': ['Koi baat nahi!', 'Welcome!', 'Khushi hui madad karke!'],
'good morning': ['Good morning! Din acha guzre!', 'Subah bakhair!'],
'good night': ['Good night! Acha sapne dekho!', 'Shab bakhair!'],
'ok': ['Theek hai!', 'Bilkul!', 'Han bhai!'],
'han': ['Acha!', 'Samajh gaya!', 'Theek hai!'],
'nahi': ['Koi baat nahi!', 'Theek hai!', 'Samajh gaya!'],
'bye': ['Allah hafiz!', 'Bye! Take care!', 'Phir milenge!']
}
# English responses
english_responses = {
'hello': ['Hello! How are you?', 'Hi there!', 'Hey! What\'s up?'],
'hi': ['Hello! How are you?', 'Hi there!', 'Hey! What\'s up?'],
'how are you': ['I\'m doing well, thanks! How about you?', 'Great! How are you?'],
'thanks': ['You\'re welcome!', 'No problem!', 'Happy to help!'],
'good morning': ['Good morning! Have a great day!', 'Morning!'],
'good night': ['Good night! Sleep well!', 'Sweet dreams!'],
'yes': ['Great!', 'Awesome!', 'Perfect!'],
'no': ['No worries!', 'That\'s okay!', 'I understand!'],
'ok': ['Perfect!', 'Great!', 'Sounds good!'],
'bye': ['Goodbye!', 'See you later!', 'Take care!']
}
# Check Urdu responses first
for keyword, replies in urdu_responses.items():
if keyword in text_lower:
return random.choice(replies)
# Check English responses
for keyword, replies in english_responses.items():
if keyword in text_lower:
return random.choice(replies)
# Contextual responses based on message length and content
if '?' in text:
question_responses = [
'Interesting question!', 'Let me think about that!',
'Good point!', 'Hmm, that\'s worth considering!'
]
return random.choice(question_responses)
if len(text) > 100:
long_responses = [
'Thanks for sharing so much detail!', 'I appreciate you explaining that!',
'That\'s quite a lot to think about!', 'Thanks for the detailed message!'
]
return random.choice(long_responses)
# Default responses
default_responses = [
'That\'s interesting!', 'Tell me more!', 'I see!', 'Really?',
'That sounds good!', 'Acha!', 'Interesting!', 'Got it!',
'Thanks for sharing!', 'Hmm, interesting point!', 'I understand!'
]
return random.choice(default_responses)
# ===== MAIN WHATSAPP AUTOMATION =====
# Short delay to switch to Chrome window
time.sleep(2)
# Step 1: Click on WhatsApp Web icon
pyautogui.click(620, 242)
time.sleep(1)
# Step 2: Drag to select the text
pyautogui.moveTo(748, 222)
pyautogui.dragTo(1670, 924, duration=1, button="left")
# Step 3: Copy selected text
pyautogui.hotkey('ctrl', 'c')
time.sleep(1)
# Click elsewhere to deselect
pyautogui.click(1594, 233)
# Step 4: Get text from clipboard
chat_history = pyperclip.paste()
print("Copied WhatsApp Chat:\n", chat_history)
# Step 5: Generate reply - CHOOSE YOUR METHOD:
# Method 1: Smart rule-based (Recommended - Always works!)
response = generate_simple_reply(chat_history.strip())
# Method 2: Hugging Face (Uncomment and add your token)
# response = generate_reply_huggingface(chat_history.strip())
# Method 3: Cohere AI (Uncomment and add your API key)
# response = generate_reply_cohere(chat_history.strip())
# Method 4: Google Gemini (Uncomment and add your API key)
# response = generate_reply_gemini(chat_history.strip())
# Method 5: Ollama - Local AI (Uncomment if you have Ollama installed)
# response = generate_reply_ollama(chat_history.strip())
# Method 6: OpenRouter (Uncomment and add your API key)
# response = generate_reply_openrouter(chat_history.strip())
print(f"Generated Response: {response}")
# Step 6: Copy reply to clipboard
pyperclip.copy(response)
# Step 7: Click on input box
pyautogui.click(1164, 974)
time.sleep(1)
# Step 8: Paste the reply
pyautogui.hotkey('ctrl', 'v')
time.sleep(1)
# Step 9: Send message
pyautogui.press('enter')