Integrate LlamaIndex.TS with Astra DB Serverless
LlamaIndex.TS can use Astra DB Serverless to store and retrieve vectors for ML applications.
Prerequisites
This guide requires the following:
-
An active Astra account.
-
An active Serverless (Vector) database.
-
An application token with the Database Administrator role.
-
Node.js 16.20.2 or later, and the required dependencies:
npm install llamaindex tsx
Connect to the database
-
In the Astra Portal, go to Databases, and select your database.
-
Make sure the database is in Active status, and then, in the Database Details section, click Generate Token.
-
In the Application Token dialog, click content_paste Copy, and then store the token securely. The token format is
AstraCS:
followed by a unique token string.Application tokens created from Database Details have the Database Administrator role for the associated database.
-
In Database Details, copy your database’s API endpoint. The endpoint format is
https://ASTRA_DB_ID-ASTRA_DB_REGION.apps.astra.datastax.com
. -
In your terminal, assign your token and API endpoint to environment variables.
-
Linux or macOS
-
Windows
-
Google Colab
export ASTRA_DB_API_ENDPOINT=API_ENDPOINT export ASTRA_DB_APPLICATION_TOKEN=TOKEN export OPENAI_API_KEY=API_KEY
set ASTRA_DB_API_ENDPOINT=API_ENDPOINT
set ASTRA_DB_APPLICATION_TOKEN=TOKEN
set OPENAI_API_KEY=API_KEY
import os os.environ["ASTRA_DB_API_ENDPOINT"] = "API_ENDPOINT" os.environ["ASTRA_DB_APPLICATION_TOKEN"] = "TOKEN" os.environ["OPENAI_API_KEY"] = "API_KEY"
-
Load and split documents
-
Download the text of Edgar Allen Poe’s "The Cask of Amontillado" to be indexed in the vector store.
curl https://raw.githubusercontent.com/CassioML/cassio-website/main/docs/frameworks/langchain/texts/amontillado.txt \ --output amontillado.txt
-
Create
load.ts
in asrc
directory. -
Import your dependencies.
load.tsimport fs from "node:fs/promises"; import { AstraDBVectorStore, Document, VectorStoreIndex, storageContextFromDefaults, } from "llamaindex"; // ...
-
Create a
main
function.This function loads your
.txt
file into aDocument
object, creates a vector store, and stores the embeddings in aVectorStoreIndex
. Wrapping the function inasync
allows the use ofawait
to execute non-blocking calls to the database.load.ts// ... import fs from "node:fs/promises"; import { AstraDBVectorStore, Document, VectorStoreIndex, storageContextFromDefaults } from "llamaindex"; const collectionName = "amontillado"; async function main() { try { // Load the text file const path = "./src/sample-data/amontillado.txt"; const essay = await fs.readFile(path, "utf-8"); // Create a Document object from the text file const document = new Document({ text: essay, id_: path }); // Initialize AstraDB Vector Store and connect const astraVS = new AstraDBVectorStore({ params: { token: process.env.ASTRA_DB_APPLICATION_TOKEN, endpoint: process.env.ASTRA_DB_API_ENDPOINT } }); await astraVS.create(collectionName, { vector: { dimension: 1536, metric: "cosine" } }); await astraVS.connect(collectionName); // Create embeddings and store them in VectorStoreIndex const ctx = await storageContextFromDefaults({ vectorStore: astraVS }); const index = await VectorStoreIndex.fromDocuments([document], { storageContext: ctx }); } catch (e) { console.error(e); } } main();
-
Compile and run the code you defined earlier.
npx tsx src/load.ts
Chat with your documents
-
Create
chat.ts
in asrc
directory. -
Import your dependencies.
chat.tsimport { AstraDBVectorStore, serviceContextFromDefaults, VectorStoreIndex, ContextChatEngine } from "llamaindex"; // ...
-
Create a
main
function. This code is separate fromload.ts
so that you can tune your query and prompt independently. -
Complete the
main
function. This might look like a lot of code, but most of the logic is for setting up the chat interaction loop.The code specific to your LlamaIndex.TS integration is:
-
A new
AstraDBVectorStore
instance called 'astraVS' is created and connects to theamontillado
collection you populated earlier. -
const index
creates an index over your vector store with the default storage context. For more on LlamaIndex’s service context, see Service Context. -
The retriever returns the top 20 results from the index of the vector store.
-
The chat engine uses the retriever to respond to user input.
chat.ts// ... import { AstraDBVectorStore, serviceContextFromDefaults, VectorStoreIndex, ContextChatEngine } from "llamaindex"; const collectionName = "amontillado"; // Function to check if the input is a quit command function isQuit(question) { return ["q", "quit", "exit"].includes(question.trim().toLowerCase()); } // Function to get user input as a promise function getUserInput(readline) { return new Promise(resolve => { readline.question("What would you like to know?\n> ", userInput => { resolve(userInput); }); }); } async function main() { const readline = require("readline").createInterface({ input: process.stdin, output: process.stdout }); try { // Connect to AstraDB Vector Store const astraVS = new AstraDBVectorStore({ params: { token: process.env.ASTRA_DB_APPLICATION_TOKEN, endpoint: process.env.ASTRA_DB_API_ENDPOINT } }); await astraVS.connect(collectionName); // Setup vector store and chat engine const ctx = serviceContextFromDefaults(); const index = await VectorStoreIndex.fromVectorStore(astraVS, ctx); const retriever = await index.asRetriever({ similarityTopK: 20 }); const chatEngine = new ContextChatEngine({ retriever }); // Query engine for chat interactions const queryEngine = await index.asQueryEngine(); // Chat loop let question = ""; while (!isQuit(question)) { question = await getUserInput(readline); if (isQuit(question)) { readline.close(); process.exit(0); } try { const answer = await queryEngine.query({ query: question }); console.log(answer.response); } catch (error) { console.error("Error:", error); } } } catch (err) { console.error(err); console.log("If your AstraDB initialization failed, make sure to set env vars for your ASTRA_DB_APPLICATION_TOKEN, ASTRA_DB_ENDPOINT, and OPENAI_API_KEY as needed."); process.exit(1); } } main().catch(console.error).finally(() => { process.exit(1); });
-
-
Compile and run the code you defined earlier.
npx tsx src/chat.ts
If you get a
TOO_MANY_COLLECTIONS
error, use the Data API command below or see delete an existing collection to delete a collection and make room.curl -sS -L -X POST "ASTRA_DB_API_ENDPOINT/api/json/v1/ASTRA_DB_KEYSPACE" \ --header "Token: ASTRA_DB_APPLICATION_TOKEN" \ --header "Content-Type: application/json" \ --data '{ "deleteCollection": { "name": "COLLECTION_NAME" } }'
Complete code examples
load.ts
import fs from "node:fs/promises";
import { AstraDBVectorStore, Document, VectorStoreIndex, storageContextFromDefaults } from "llamaindex";
const collectionName = "amontillado";
async function main() {
try {
// Load the text file
const path = "./src/sample-data/amontillado.txt";
const essay = await fs.readFile(path, "utf-8");
// Create a Document object from the text file
const document = new Document({ text: essay, id_: path });
// Initialize AstraDB Vector Store and connect
const astraVS = new AstraDBVectorStore({
params: {
token: process.env.ASTRA_DB_APPLICATION_TOKEN,
endpoint: process.env.ASTRA_DB_API_ENDPOINT
}
});
await astraVS.create(collectionName, {
vector: { dimension: 1536, metric: "cosine" }
});
await astraVS.connect(collectionName);
// Create embeddings and store them in VectorStoreIndex
const ctx = await storageContextFromDefaults({ vectorStore: astraVS });
const index = await VectorStoreIndex.fromDocuments([document], {
storageContext: ctx
});
} catch (e) {
console.error(e);
}
}
main();
chat.ts
import {
AstraDBVectorStore,
serviceContextFromDefaults,
VectorStoreIndex,
ContextChatEngine
} from "llamaindex";
const collectionName = "amontillado";
// Function to check if the input is a quit command
function isQuit(question) {
return ["q", "quit", "exit"].includes(question.trim().toLowerCase());
}
// Function to get user input as a promise
function getUserInput(readline) {
return new Promise(resolve => {
readline.question("What would you like to know?\n> ", userInput => {
resolve(userInput);
});
});
}
async function main() {
const readline = require("readline").createInterface({
input: process.stdin,
output: process.stdout
});
try {
// Connect to AstraDB Vector Store
const astraVS = new AstraDBVectorStore({
params: {
token: process.env.ASTRA_DB_APPLICATION_TOKEN,
endpoint: process.env.ASTRA_DB_API_ENDPOINT
}
});
await astraVS.connect(collectionName);
// Setup vector store and chat engine
const ctx = serviceContextFromDefaults();
const index = await VectorStoreIndex.fromVectorStore(astraVS, ctx);
const retriever = await index.asRetriever({ similarityTopK: 20 });
const chatEngine = new ContextChatEngine({ retriever });
// Query engine for chat interactions
const queryEngine = await index.asQueryEngine();
// Chat loop
let question = "";
while (!isQuit(question)) {
question = await getUserInput(readline);
if (isQuit(question)) {
readline.close();
process.exit(0);
}
try {
const answer = await queryEngine.query({ query: question });
console.log(answer.response);
} catch (error) {
console.error("Error:", error);
}
}
} catch (err) {
console.error(err);
console.log("If your AstraDB initialization failed, make sure to set env vars for your ASTRA_DB_APPLICATION_TOKEN, ASTRA_DB_ENDPOINT, and OPENAI_API_KEY as needed.");
process.exit(1);
}
}
main().catch(console.error).finally(() => {
process.exit(1);
});