From 31c72561e5743744ef12db60569e158e0058943a Mon Sep 17 00:00:00 2001 From: Alex Rattray Date: Tue, 27 Jun 2023 14:47:39 -0400 Subject: [PATCH] docs: describe migration from v0.2 to v0.3 (#46) * docs: describe migration * docs: clean up examples * docs: add trailing newlines * docs: update migration guide --------- Co-authored-by: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Co-authored-by: Robert Craigie --- README.md | 66 ++++++++++++++++++++++++++++++++++++++++++- api.md | 2 +- examples/streaming.py | 4 +-- 3 files changed, 68 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 44f3ff95..2bddf49b 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,70 @@ The Anthropic Python library provides convenient access to the Anthropic REST AP application. It includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). +## Migration from v0.2.x and below + +In `v0.3.0`, we introduced a fully rewritten SDK. + +The new version uses separate sync and async clients, unified streaming, typed params and structured response objects, and resource-oriented methods: + +**Sync before/after:** + +```diff +- client = anthropic.Client(os.environ["ANTHROPIC_API_KEY"]) ++ client = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"]) + # or, simply provide an ANTHROPIC_API_KEY environment variable: ++ client = anthropic.Anthropic(); + +- rsp = client.completion(**params) +- rsp["completion"] ++ rsp = client.completions.create(**params) ++ rsp.completion +``` + +**Async before/after:** + +```diff +- client = anthropic.Client(os.environ["ANTHROPIC_API_KEY"]) ++ client = anthropic.AsyncAnthropic(api_key=os.environ["ANTHROPIC_API_KEY"]) + +- await client.acompletion(**params) ++ await client.completions.create(**params) +``` + +The `.completion_stream()` and `.acompletion_stream()` methods have been removed; +simply pass `stream=True`to `.completions.create()`. + +Streaming responses are now incremental; the full text is not sent in each message, +as v0.3 sends the `Anthropic-Version: 2023-06-01` header. + +
+Example streaming diff + +```diff py + import anthropic + +- client = anthropic.Client(os.environ["ANTHROPIC_API_KEY"]) ++ client = anthropic.Anthropic() + + # Streams are now incremental diffs of text + # rather than sending the whole message every time: + text = " +- stream = client.completion_stream(**params) +- for data in stream: +- diff = data["completion"].replace(text, "") +- text = data["completion"] ++ stream = client.completions.create(**params, stream=True) ++ for data in stream: ++ diff = data.completion # incremental text ++ text += data.completion + print(diff, end="") + + print("Done. Final text is:") + print(text) +``` + +
+ ## Documentation The API documentation can be found [here](https://docs.anthropic.com/claude/reference/). @@ -249,4 +313,4 @@ We are keen for your feedback; please open an [issue](https://www.github.com/ant ## Requirements -Python 3.7 or higher. \ No newline at end of file +Python 3.7 or higher. diff --git a/api.md b/api.md index 30d3a2c7..83650cb0 100644 --- a/api.md +++ b/api.md @@ -15,4 +15,4 @@ from anthropic.types import Completion Methods: - client.completions.create(\*\*params) -> Completion -- client.completions.create(\*\*params) -> Completion \ No newline at end of file +- client.completions.create(\*\*params) -> Completion diff --git a/examples/streaming.py b/examples/streaming.py index 12c7d2f8..16e1230f 100644 --- a/examples/streaming.py +++ b/examples/streaming.py @@ -14,7 +14,7 @@ def sync_stream() -> None: stream = client.completions.create( - prompt=f"\n\nHuman: {question}\n\nAssistant:", + prompt=f"{HUMAN_PROMPT} {question}{AI_PROMPT}", model="claude-v1", stream=True, max_tokens_to_sample=300, @@ -28,7 +28,7 @@ def sync_stream() -> None: async def async_stream() -> None: stream = await async_client.completions.create( - prompt=f"{HUMAN_PROMPT}{question}{AI_PROMPT}", + prompt=f"{HUMAN_PROMPT} {question}{AI_PROMPT}", model="claude-v1", stream=True, max_tokens_to_sample=300,