Skip to content

Commit 20d8a52

Browse files
committed
chore: deprecated Claude 3.5 sonnet for TextGenerator
1 parent 61a9484 commit 20d8a52

File tree

2 files changed

+10
-36
lines changed

2 files changed

+10
-36
lines changed

bigframes/ml/llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -877,7 +877,7 @@ class Claude3TextGenerator(base.RetriableRemotePredictor):
877877
The model for natural language tasks. Possible values are "claude-3-sonnet", "claude-3-haiku", "claude-3-5-sonnet" and "claude-3-opus".
878878
"claude-3-sonnet" (deprecated) is Anthropic's dependable combination of skills and speed. It is engineered to be dependable for scaled AI deployments across a variety of use cases.
879879
"claude-3-haiku" is Anthropic's fastest, most compact vision and text model for near-instant responses to simple queries, meant for seamless AI experiences mimicking human interactions.
880-
"claude-3-5-sonnet" is Anthropic's most powerful AI model and maintains the speed and cost of Claude 3 Sonnet, which is a mid-tier model.
880+
"claude-3-5-sonnet" (deprecated) is Anthropic's most powerful AI model and maintains the speed and cost of Claude 3 Sonnet, which is a mid-tier model.
881881
"claude-3-opus" (deprecated) is Anthropic's second-most powerful AI model, with strong performance on highly complex tasks.
882882
https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#available-claude-models
883883
If no setting is provided, "claude-3-sonnet" will be used by default

tests/system/load/test_llm.py

Lines changed: 9 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -98,18 +98,10 @@ def test_llm_gemini_w_ground_with_google_search(llm_remote_text_df):
9898

9999

100100
# (b/366290533): Claude models are of extremely low capacity. The tests should reside in small tests. Moving these here just to protect BQML's shared capacity(as load test only runs once per day.) and make sure we still have minimum coverage.
101-
@pytest.mark.parametrize(
102-
"model_name",
103-
("claude-3-haiku", "claude-3-5-sonnet"),
104-
)
105101
@pytest.mark.flaky(retries=3, delay=120)
106-
def test_claude3_text_generator_create_load(
107-
dataset_id, model_name, session, session_us_east5, bq_connection
108-
):
109-
if model_name in ("claude-3-5-sonnet",):
110-
session = session_us_east5
102+
def test_claude3_text_generator_create_load(dataset_id, session, bq_connection):
111103
claude3_text_generator_model = llm.Claude3TextGenerator(
112-
model_name=model_name, connection_name=bq_connection, session=session
104+
model_name="claude-3-haiku", connection_name=bq_connection, session=session
113105
)
114106
assert claude3_text_generator_model is not None
115107
assert claude3_text_generator_model._bqml_model is not None
@@ -120,40 +112,28 @@ def test_claude3_text_generator_create_load(
120112
)
121113
assert f"{dataset_id}.temp_text_model" == reloaded_model._bqml_model.model_name
122114
assert reloaded_model.connection_name == bq_connection
123-
assert reloaded_model.model_name == model_name
115+
assert reloaded_model.model_name == "claude-3-haiku"
124116

125117

126-
@pytest.mark.parametrize(
127-
"model_name",
128-
("claude-3-haiku", "claude-3-5-sonnet"),
129-
)
130118
@pytest.mark.flaky(retries=3, delay=120)
131119
def test_claude3_text_generator_predict_default_params_success(
132-
llm_text_df, model_name, session, session_us_east5, bq_connection
120+
llm_text_df, session, bq_connection
133121
):
134-
if model_name in ("claude-3-5-sonnet",):
135-
session = session_us_east5
136122
claude3_text_generator_model = llm.Claude3TextGenerator(
137-
model_name=model_name, connection_name=bq_connection, session=session
123+
model_name="claude-3-haiku", connection_name=bq_connection, session=session
138124
)
139125
df = claude3_text_generator_model.predict(llm_text_df).to_pandas()
140126
utils.check_pandas_df_schema_and_index(
141127
df, columns=utils.ML_GENERATE_TEXT_OUTPUT, index=3, col_exact=False
142128
)
143129

144130

145-
@pytest.mark.parametrize(
146-
"model_name",
147-
("claude-3-haiku", "claude-3-5-sonnet"),
148-
)
149131
@pytest.mark.flaky(retries=3, delay=120)
150132
def test_claude3_text_generator_predict_with_params_success(
151-
llm_text_df, model_name, session, session_us_east5, bq_connection
133+
llm_text_df, session, bq_connection
152134
):
153-
if model_name in ("claude-3-5-sonnet",):
154-
session = session_us_east5
155135
claude3_text_generator_model = llm.Claude3TextGenerator(
156-
model_name=model_name, connection_name=bq_connection, session=session
136+
model_name="claude-3-haiku", connection_name=bq_connection, session=session
157137
)
158138
df = claude3_text_generator_model.predict(
159139
llm_text_df, max_output_tokens=100, top_k=20, top_p=0.5
@@ -163,20 +143,14 @@ def test_claude3_text_generator_predict_with_params_success(
163143
)
164144

165145

166-
@pytest.mark.parametrize(
167-
"model_name",
168-
("claude-3-haiku", "claude-3-5-sonnet"),
169-
)
170146
@pytest.mark.flaky(retries=3, delay=120)
171147
def test_claude3_text_generator_predict_multi_col_success(
172-
llm_text_df, model_name, session, session_us_east5, bq_connection
148+
llm_text_df, session, bq_connection
173149
):
174-
if model_name in ("claude-3-5-sonnet",):
175-
session = session_us_east5
176150

177151
llm_text_df["additional_col"] = 1
178152
claude3_text_generator_model = llm.Claude3TextGenerator(
179-
model_name=model_name, connection_name=bq_connection, session=session
153+
model_name="claude-3-haiku", connection_name=bq_connection, session=session
180154
)
181155
df = claude3_text_generator_model.predict(llm_text_df).to_pandas()
182156
utils.check_pandas_df_schema_and_index(

0 commit comments

Comments
 (0)