|
14 | 14 |
|
15 | 15 | from __future__ import annotations |
16 | 16 |
|
17 | | -from typing import cast, Mapping, Optional, Union |
| 17 | +from typing import Any, cast, List, Mapping, Optional, Union |
18 | 18 |
|
19 | 19 | import bigframes_vendored.constants |
20 | 20 | import google.cloud.bigquery |
@@ -431,3 +431,102 @@ def transform( |
431 | 431 | return bpd.read_gbq_query(sql) |
432 | 432 | else: |
433 | 433 | return session.read_gbq_query(sql) |
| 434 | + |
| 435 | + |
| 436 | +@log_adapter.method_logger(custom_base_name="bigquery_ml") |
| 437 | +def generate_text( |
| 438 | + model: Union[bigframes.ml.base.BaseEstimator, str, pd.Series], |
| 439 | + input_: Union[pd.DataFrame, dataframe.DataFrame, str], |
| 440 | + *, |
| 441 | + temperature: Optional[float] = None, |
| 442 | + max_output_tokens: Optional[int] = None, |
| 443 | + top_k: Optional[int] = None, |
| 444 | + top_p: Optional[float] = None, |
| 445 | + flatten_json_output: Optional[bool] = None, |
| 446 | + safety_settings: Optional[Mapping[str, str]] = None, |
| 447 | + stop_sequences: Optional[List[str]] = None, |
| 448 | + ground_with_google_search: Optional[bool] = None, |
| 449 | + model_params: Optional[Mapping[str, Any]] = None, |
| 450 | + request_type: Optional[str] = None, |
| 451 | +) -> dataframe.DataFrame: |
| 452 | + """ |
| 453 | + Generates text using a BigQuery ML model. |
| 454 | +
|
| 455 | + See the `BigQuery ML GENERATE_TEXT function syntax |
| 456 | + <https://docs.cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-generate-text>`_ |
| 457 | + for additional reference. |
| 458 | +
|
| 459 | + Args: |
| 460 | + model (bigframes.ml.base.BaseEstimator or str): |
| 461 | + The model to use for text generation. |
| 462 | + input_ (Union[bigframes.pandas.DataFrame, str]): |
| 463 | + The DataFrame or query to use for text generation. |
| 464 | + temperature (float, optional): |
| 465 | + A FLOAT64 value that is used for sampling promiscuity. The value |
| 466 | + must be in the range ``[0.0, 1.0]``. A lower temperature works well |
| 467 | + for prompts that expect a more deterministic and less open-ended |
| 468 | + or creative response, while a higher temperature can lead to more |
| 469 | + diverse or creative results. A temperature of ``0`` is |
| 470 | + deterministic, meaning that the highest probability response is |
| 471 | + always selected. |
| 472 | + max_output_tokens (int, optional): |
| 473 | + An INT64 value that sets the maximum number of tokens in the |
| 474 | + generated text. |
| 475 | + top_k (int, optional): |
| 476 | + An INT64 value that changes how the model selects tokens for |
| 477 | + output. A ``top_k`` of ``1`` means the next selected token is the |
| 478 | + most probable among all tokens in the model's vocabulary. A |
| 479 | + ``top_k`` of ``3`` means that the next token is selected from |
| 480 | + among the three most probable tokens by using temperature. The |
| 481 | + default value is ``40``. |
| 482 | + top_p (float, optional): |
| 483 | + A FLOAT64 value that changes how the model selects tokens for |
| 484 | + output. Tokens are selected from most probable to least probable |
| 485 | + until the sum of their probabilities equals the ``top_p`` value. |
| 486 | + For example, if tokens A, B, and C have a probability of 0.3, 0.2, |
| 487 | + and 0.1 and the ``top_p`` value is ``0.5``, then the model will |
| 488 | + select either A or B as the next token by using temperature. The |
| 489 | + default value is ``0.95``. |
| 490 | + flatten_json_output (bool, optional): |
| 491 | + A BOOL value that determines the content of the generated JSON column. |
| 492 | + safety_settings (Mapping[str, str], optional): |
| 493 | + A STRUCT value that contains the safety settings for the model. |
| 494 | + The STRUCT must have a ``category`` field of type STRING and a |
| 495 | + ``threshold`` field of type STRING. |
| 496 | + stop_sequences (List[str], optional): |
| 497 | + An ARRAY<STRING> value that contains the stop sequences for the model. |
| 498 | + ground_with_google_search (bool, optional): |
| 499 | + A BOOL value that determines whether to ground the model with Google Search. |
| 500 | + model_params (Mapping[str, Any], optional): |
| 501 | + A JSON value that contains the parameters for the model. |
| 502 | + request_type (str, optional): |
| 503 | + A STRING value that contains the request type for the model. |
| 504 | +
|
| 505 | + Returns: |
| 506 | + bigframes.pandas.DataFrame: |
| 507 | + The generated text. |
| 508 | + """ |
| 509 | + import bigframes.pandas as bpd |
| 510 | + |
| 511 | + model_name, session = _get_model_name_and_session(model, input_) |
| 512 | + table_sql = _to_sql(input_) |
| 513 | + |
| 514 | + sql = bigframes.core.sql.ml.generate_text( |
| 515 | + model_name=model_name, |
| 516 | + table=table_sql, |
| 517 | + temperature=temperature, |
| 518 | + max_output_tokens=max_output_tokens, |
| 519 | + top_k=top_k, |
| 520 | + top_p=top_p, |
| 521 | + flatten_json_output=flatten_json_output, |
| 522 | + safety_settings=safety_settings, |
| 523 | + stop_sequences=stop_sequences, |
| 524 | + ground_with_google_search=ground_with_google_search, |
| 525 | + model_params=model_params, |
| 526 | + request_type=request_type, |
| 527 | + ) |
| 528 | + |
| 529 | + if session is None: |
| 530 | + return bpd.read_gbq_query(sql) |
| 531 | + else: |
| 532 | + return session.read_gbq_query(sql) |
0 commit comments