Commit 99233b73 authored by Your Name's avatar Your Name

add example

parent 30c051ce
import { check } from "k6";
import encoding from "k6/encoding";
export default function() {
// Standard base64 encoding/decoding with '=' padding
let str = "hello world";
let enc = "aGVsbG8gd29ybGQ=";
check(null, {
"is std encoding correct": () => encoding.b64encode(str) === enc,
"is std decoding correct": () => encoding.b64decode(enc, null, "s") === str
});
// Standard base64 encoding/decoding without '=' padding
str = "hello world";
enc = "aGVsbG8gd29ybGQ";
check(null, {
"is rawstd encoding correct": () => encoding.b64encode(str, 'rawstd') === enc,
"is rawstd decoding correct": () => encoding.b64decode(enc, 'rawstd', "s") === str
});
// URL-safe base64 encoding/decoding with '=' padding
str = "小飼弾..";
enc = "5bCP6aO85by-Li4=";
check(null, {
"is url encoding correct": () => encoding.b64encode(str, 'url') === enc,
"is url decoding correct": () => encoding.b64decode(enc, 'url', "s") === str
});
// URL-safe base64 encoding/decoding without '=' padding
str = "小飼弾..";
enc = "5bCP6aO85by-Li4";
check(null, {
"is rawurl encoding correct": () => encoding.b64encode(str, 'rawurl') === enc,
"is rawurl decoding correct": () => encoding.b64decode(enc, 'rawurl', "s") === str
});
};
{
"vus": 100,
"collectors": {
"influxdb": {
"tagsAsFields": ["vu","iter", "url", "name"]
}
}
}
import http from "k6/http";
import { check, group } from "k6";
export let options = {
maxRedirects: 3
};
export default function() {
// VU cookie jar
group("Simple cookies send with VU jar", function() {
let cookies = {
name: "value1",
name2: "value2"
};
let res = http.get("http://httpbin.org/cookies", { cookies: cookies });
check(res, {
"status is 200": (r) => r.status === 200,
"has cookie 'name'": (r) => r.json().cookies.name.length > 0,
"has cookie 'name2'": (r) => r.json().cookies.name2.length > 0
});
// Since the cookies are set as "request cookies" they won't be added to VU cookie jar
let vuJar = http.cookieJar();
let cookiesForURL = vuJar.cookiesForURL(res.url);
check(null, {
"vu jar doesn't have cookie 'name'": () => cookiesForURL.name === undefined,
"vu jar doesn't have cookie 'name2'": () => cookiesForURL.name2 === undefined
});
});
group("Simple cookies set with VU jar", function() {
// Since this request redirects the `res.cookies` property won't contain the cookies
let res = http.get("http://httpbin.org/cookies/set?name3=value3&name4=value4");
check(res, {
"status is 200": (r) => r.status === 200
});
// Make sure cookies have been added to VU cookie jar
let vuJar = http.cookieJar();
let cookiesForURL = vuJar.cookiesForURL(res.url);
check(null, {
"vu jar has cookie 'name3'": () => cookiesForURL.name3.length > 0,
"vu jar has cookie 'name4'": () => cookiesForURL.name4.length > 0
});
});
// Local cookie jar
group("Simple cookies send with local jar", function() {
let jar = new http.CookieJar();
let cookies = {
name5: "value5",
name6: "value6"
};
let res = http.get("http://httpbin.org/cookies", { cookies: cookies, jar: jar });
check(res, {
"status is 200": (r) => r.status === 200,
"has cookie 'name5'": (r) => r.json().cookies.name5.length > 0,
"has cookie 'name6'": (r) => r.json().cookies.name6.length > 0
});
// Since the cookies are set as "request cookies" they won't be added to VU cookie jar
let cookiesForURL = jar.cookiesForURL(res.url);
check(null, {
"local jar doesn't have cookie 'name5'": () => cookiesForURL.name5 === undefined,
"local jar doesn't have cookie 'name6'": () => cookiesForURL.name6 === undefined
});
// Make sure cookies have NOT been added to VU cookie jar
let vuJar = http.cookieJar();
cookiesForURL = vuJar.cookiesForURL(res.url);
check(null, {
"vu jar doesn't have cookie 'name5'": () => cookiesForURL.name === undefined,
"vu jar doesn't have cookie 'name6'": () => cookiesForURL.name2 === undefined
});
});
group("Advanced send with local jar", function() {
let jar = new http.CookieJar();
jar.set("http://httpbin.org/cookies", "name7", "value7");
jar.set("http://httpbin.org/cookies", "name8", "value8");
let res = http.get("http://httpbin.org/cookies", { jar: jar });
let cookiesForURL = jar.cookiesForURL(res.url);
check(res, {
"status is 200": (r) => r.status === 200,
"has cookie 'name7'": (r) => r.json().cookies.name7.length > 0,
"has cookie 'name8'": (r) => r.json().cookies.name8.length > 0
});
cookiesForURL = jar.cookiesForURL(res.url);
check(null, {
"local jar has cookie 'name7'": () => cookiesForURL.name7.length > 0,
"local jar has cookie 'name8'": () => cookiesForURL.name8.length > 0
});
// Make sure cookies have NOT been added to VU cookie jar
let vuJar = http.cookieJar();
cookiesForURL = vuJar.cookiesForURL(res.url);
check(null, {
"vu jar doesn't have cookie 'name7'": () => cookiesForURL.name7 === undefined,
"vu jar doesn't have cookie 'name8'": () => cookiesForURL.name8 === undefined
});
});
group("Advanced cookie attributes", function() {
let jar = http.cookieJar();
jar.set("http://httpbin.org/cookies", "name9", "value9", { domain: "httpbin.org", path: "/cookies" });
let res = http.get("http://httpbin.org/cookies", { jar: jar });
check(res, {
"status is 200": (r) => r.status === 200,
"has cookie 'name9'": (r) => r.json().cookies.name9 === "value9"
});
jar.set("http://httpbin.org/cookies", "name10", "value10", { domain: "example.com", path: "/" });
res = http.get("http://httpbin.org/cookies", { jar: jar });
check(res, {
"status is 200": (r) => r.status === 200,
"doesn't have cookie 'name10'": (r) => r.json().cookies.name10 === undefined
});
});
}
import crypto from 'k6/crypto';
export default function() {
// Shorthand API
let hash = crypto.sha1("some text", "hex");
console.log(hash);
// Flexible API
let hasher = crypto.createHash("sha1")
hasher.update("some other text")
console.log(hasher.digest("hex"))
console.log(hasher.digest("base64"))
}
import http from "k6/http";
import { Counter, Gauge, Rate, Trend } from "k6/metrics";
import { check } from "k6";
/*
* Custom metrics are useful when you want to track something that is not
* provided out of the box.
*
* There are four types of custom metrics: Counter, Gauge, Rate and Trend.
*
* - Counter: a sum of all values added to the metric
* - Gauge: a value that change to whatever you set it to
* - Rate: rate of "truthiness", how many values out of total are !=0
* - Trend: time series, all values are recorded, statistics can be calculated
* on it
*/
let myCounter = new Counter("my_counter");
let myGauge = new Gauge("my_gauge");
let myRate = new Rate("my_rate");
let myTrend = new Trend("my_trend");
let maxResponseTime = 0.0;
export default function () {
let res = http.get("http://httpbin.org/");
let passed = check(res, { "status is 200": (r) => r.status === 200 });
// Add one for number of requests
myCounter.add(1);
console.log(myCounter.name, " is config ready")
// Set max response time seen
maxResponseTime = Math.max(maxResponseTime, res.timings.duration);
myGauge.add(maxResponseTime);
// Add check success or failure to keep track of rate
myRate.add(passed);
// Keep track of TCP-connecting and TLS handshaking part of the response time
myTrend.add(res.timings.connecting + res.timings.tls_handshaking);
}
import { group, check, sleep } from "k6";
import { Counter, Rate } from "k6/metrics";
import http from "k6/http";
export let options = {
vus: 5,
thresholds: {
my_rate: ["rate>=0.4"], // Require my_rate's success rate to be >=40%
http_req_duration: ["avg<1000"], // Require http_req_duration's average to be <1000ms
}
};
let mCounter = new Counter("my_counter");
let mRate = new Rate("my_rate");
export default function() {
check(Math.random(), {
"top-level test": (v) => v < 1/3
});
group("my group", function() {
mCounter.add(1, { tag: "test" });
check(Math.random(), {
"random value is < 0.5": (v) => mRate.add(v < 0.5),
});
group("json", function() {
let res = http.get("https://httpbin.org/get", {
headers: { "X-Test": "abc123" },
});
check(res, {
"status is 200": (res) => res.status === 200,
"X-Test header is correct": (res) => res.json().headers['X-Test'] === "abc123",
});
});
group("html", function() {
check(http.get("http://test.k6.io/"), {
"status is 200": (res) => res.status === 200,
"content type is html": (res) => res.headers['Content-Type'].startsWith("text/html"),
"welcome message is correct": (res) => res.html("p.description").text() === "Collection of simple web-pages suitable for load testing.",
});
});
group("nested", function() {
check(null, {
"always passes": true,
"always fails": false,
});
});
});
sleep(10 * Math.random());
};
import { check } from 'k6';
import { chromium } from 'k6/experimental/browser';
export const options = {
thresholds: {
checks: ["rate==1.0"]
}
}
export default async function() {
const browser = chromium.launch({
headless: __ENV.XK6_HEADLESS ? true : false,
});
const context = browser.newContext();
const page = context.newPage();
try {
// Goto front page, find login link and click it
await page.goto('https://test.k6.io/', { waitUntil: 'networkidle' });
await Promise.all([
page.waitForNavigation(),
page.locator('a[href="/my_messages.php"]').click(),
]);
// Enter login credentials and login
page.locator('input[name="login"]').type('admin');
page.locator('input[name="password"]').type('123');
// We expect the form submission to trigger a navigation, so to prevent a
// race condition, setup a waiter concurrently while waiting for the click
// to resolve.
await Promise.all([
page.waitForNavigation(),
page.locator('input[type="submit"]').click(),
]);
check(page, {
'header': page.locator('h2').textContent() == 'Welcome, admin!',
});
} finally {
page.close();
browser.close();
}
}
version: '3.3'
services:
k6-experimental-redis:
image: 'redis:alpine'
ports:
- '6379:6379'
command: redis-server
Bonjour, tout le monde!
\ No newline at end of file
import { open, SeekMode } from "k6/experimental/fs";
export const options = {
vus: 100,
iterations: 1000,
};
// k6 doesn't support async in the init context. We use a top-level async function for `await`.
//
// Each Virtual User gets its own `file` copy.
// So, operations like `seek` or `read` won't impact other VUs.
let file;
(async function () {
file = await open("bonjour.txt");
})();
export default async function () {
// About information about the file
const fileinfo = await file.stat();
if (fileinfo.name != "bonjour.txt") {
throw new Error("Unexpected file name");
}
const buffer = new Uint8Array(4);
let totalBytesRead = 0;
while (true) {
// Read into the buffer
const bytesRead = await file.read(buffer);
if (bytesRead == null) {
// EOF
break;
}
// Do something useful with the content of the buffer
totalBytesRead += bytesRead;
// If bytesRead is less than the buffer size, we've read the whole file
if (bytesRead < buffer.byteLength) {
break;
}
}
// Check that we read the expected number of bytes
if (totalBytesRead != fileinfo.size) {
throw new Error("Unexpected number of bytes read");
}
// Seek back to the beginning of the file
await file.seek(0, SeekMode.Start);
}
import { check } from "k6";
import http from "k6/http";
import redis from "k6/experimental/redis";
import exec from "k6/execution";
import { textSummary } from "https://jslib.k6.io/k6-summary/0.0.1/index.js";
export const options = {
scenarios: {
redisPerformance: {
executor: "shared-iterations",
vus: 10,
iterations: 200,
exec: "measureRedisPerformance",
},
usingRedisData: {
executor: "shared-iterations",
vus: 10,
iterations: 200,
exec: "measureUsingRedisData",
},
},
};
// Get the redis instance(s) address and password from the environment
const redis_addrs = __ENV.REDIS_ADDRS || "";
const redis_password = __ENV.REDIS_PASSWORD || "";
// Instantiate a new redis client
const redisClient = new redis.Client({
addrs: redis_addrs.split(",") || new Array("localhost:6379"), // in the form of 'host:port', separated by commas
password: redis_password,
});
// Prepare an array of crocodile ids for later use
// in the context of the measureUsingRedisData function.
const crocodileIDs = new Array(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
export function measureRedisPerformance() {
// VUs are executed in a parallel fashion,
// thus, to ensure that parallel VUs are not
// modifying the same key at the same time,
// we use keys indexed by the VU id.
const key = `foo-${exec.vu.idInTest}`;
redisClient
.set(`foo-${exec.vu.idInTest}`, 1)
.then(() => redisClient.get(`foo-${exec.vu.idInTest}`))
.then((value) => redisClient.incrBy(`foo-${exec.vu.idInTest}`, value))
.then((_) => redisClient.del(`foo-${exec.vu.idInTest}`))
.then((_) => redisClient.exists(`foo-${exec.vu.idInTest}`))
.then((exists) => {
if (exists !== 0) {
throw new Error("foo should have been deleted");
}
});
}
export function setup() {
redisClient.sadd("crocodile_ids", ...crocodileIDs);
}
export function measureUsingRedisData() {
// Pick a random crocodile id from the dedicated redis set,
// we have filled in setup().
redisClient
.srandmember("crocodile_ids")
.then((randomID) => {
const url = `https://test-api.k6.io/public/crocodiles/${randomID}`;
const res = http.get(url);
check(res, {
"status is 200": (r) => r.status === 200,
"content-type is application/json": (r) =>
r.headers["content-type"] === "application/json",
});
return url;
})
.then((url) => redisClient.hincrby("k6_crocodile_fetched", url, 1));
}
export function teardown() {
redisClient.del("crocodile_ids");
}
export function handleSummary(data) {
redisClient
.hgetall("k6_crocodile_fetched")
.then((fetched) =>
Object.assign(data, { k6_crocodile_fetched: fetched })
)
.then((data) =>
redisClient.set(`k6_report_${Date.now()}`, JSON.stringify(data))
)
.then(() => redisClient.del("k6_crocodile_fetched"));
return {
stdout: textSummary(data, { indent: " ", enableColors: true }),
};
}
// based on https://developer.mozilla.org/en-US/docs/Web/API/setTimeout#reasons_for_delays_longer_than_specified
import { setTimeout } from "k6/experimental/timers";
let last = 0;
let iterations = 10;
function timeout() {
// log the time of this call
logline(new Date().getMilliseconds());
// if we are not finished, schedule the next call
if (iterations-- > 0) {
setTimeout(timeout, 0);
}
}
export default function () {
// initialize iteration count and the starting timestamp
iterations = 10;
last = new Date().getMilliseconds();
// start timer
setTimeout(timeout, 0);
}
function pad(number) {
return number.toString().padStart(3, "0");
}
function logline(now) {
// log the last timestamp, the new timestamp, and the difference
console.log(`${pad(last)} ${pad(now)} ${now - last}`);
last = now;
}
import http from "k6/http";
import { check } from "k6";
import tracing from "k6/experimental/tracing";
// Explicitly instantiating a tracing client allows to distringuish
// instrumented from non-instrumented HTTP calls, by keeping APIs separate.
// It also allows for finer-grained configuration control, by letting
// users changing the tracing configuration on the fly during their
// script's execution.
let instrumentedHTTP = new tracing.Client({
propagator: "w3c",
});
const testData = { name: "Bert" };
export default () => {
// Using the tracing client instance, HTTP calls will have
// their trace context headers set.
let res = instrumentedHTTP.request("GET", "http://httpbin.org/get", null, {
headers: {
"X-Example-Header": "instrumented/request",
},
});
check(res, {
"status is 200": (r) => r.status === 200,
});
// The tracing client offers more flexibility over
// the `instrumentHTTP` function, as it leaves the
// imported standard http module untouched. Thus,
// one can still perform non-instrumented HTTP calls
// using it.
res = http.post("http://httpbin.org/post", JSON.stringify(testData), {
headers: { "X-Example-Header": "noninstrumented/post" },
});
check(res, {
"status is 200": (r) => r.status === 200,
});
res = instrumentedHTTP.del("http://httpbin.org/delete", null, {
headers: { "X-Example-Header": "instrumented/delete" },
});
check(res, {
"status is 200": (r) => r.status === 200,
});
};
import http from "k6/http";
import { check } from "k6";
import tracing from "k6/experimental/tracing";
// instrumentHTTP will ensure that all requests made by the http module
// will be traced. The first argument is a configuration object that
// can be used to configure the tracer.
//
// Currently supported HTTP methods are: get, post, put, patch, head,
// del, options, and request.
tracing.instrumentHTTP({
propagator: "w3c",
});
export default () => {
let res = http.get("http://httpbin.org/get", {
headers: {
"X-Example-Header": "instrumented/get",
},
});
check(res, {
"status is 200": (r) => r.status === 200,
});
};
import http from "k6/http";
import { check } from "k6";
import tracing from "k6/experimental/tracing";
export const options = {
// As the number of sampled requests will converge towards
// the sampling percentage, we need to increase the number
// of iterations to get a more accurate result.
iterations: 10000,
vus: 100,
};
tracing.instrumentHTTP({
propagator: "w3c",
// Only 10% of the requests made will have their trace context
// header's sample flag set to activated.
sampling: 0.1,
});
export default () => {
let res = http.get("http://httpbin.org/get", {
headers: {
"X-Example-Header": "instrumented/get",
},
});
check(res, {
"status is 200": (r) => r.status === 200,
});
};
import { crypto } from "k6/experimental/webcrypto";
export default async function () {
const key = await crypto.subtle.generateKey(
{
name: "AES-CBC",
length: 256,
},
true,
["encrypt", "decrypt"]
);
const encoded = stringToArrayBuffer("Hello, World!");
const iv = crypto.getRandomValues(new Uint8Array(16));
const ciphertext = await crypto.subtle.encrypt(
{
name: "AES-CBC",
iv: iv,
},
key,
encoded
);
const plaintext = await crypto.subtle.decrypt(
{
name: "AES-CBC",
iv: iv,
},
key,
ciphertext,
);
console.log("deciphered text == original text: ", arrayBufferToHex(plaintext) === arrayBufferToHex(encoded))
}
function arrayBufferToHex(buffer) {
return [...new Uint8Array(buffer)]
.map((x) => x.toString(16).padStart(2, "0"))
.join("");
}
function stringToArrayBuffer(str) {
var buf = new ArrayBuffer(str.length * 2); // 2 bytes for each char
var bufView = new Uint16Array(buf);
for (var i = 0, strLen = str.length; i < strLen; i++) {
bufView[i] = str.charCodeAt(i);
}
return buf;
}
\ No newline at end of file
import {
randomString,
randomIntBetween,
} from "https://jslib.k6.io/k6-utils/1.1.0/index.js";
import { WebSocket } from "k6/experimental/websockets";
import {
setTimeout,
clearTimeout,
setInterval,
clearInterval,
} from "k6/experimental/timers";
let chatRoomName = "publicRoom"; // choose your chat room name
let sessionDuration = randomIntBetween(5000, 60000); // user session between 5s and 1m
export default function () {
for (let i = 0; i < 4; i++) {
startWSWorker(i);
}
}
function startWSWorker(id) {
let url = `wss://test-api.k6.io/ws/crocochat/${chatRoomName}/`;
let ws = new WebSocket(url);
ws.addEventListener("open", () => {
ws.send(
JSON.stringify({
event: "SET_NAME",
new_name: `Croc ${__VU}:${id}`,
})
);
ws.addEventListener("message", (e) => {
let msg = JSON.parse(e.data);
if (msg.event === "CHAT_MSG") {
console.log(
`VU ${__VU}:${id} received: ${msg.user} says: ${msg.message}`
);
} else if (msg.event === "ERROR") {
console.error(`VU ${__VU}:${id} received:: ${msg.message}`);
} else {
console.log(
`VU ${__VU}:${id} received unhandled message: ${msg.message}`
);
}
});
let intervalId = setInterval(() => {
ws.send(
JSON.stringify({
event: "SAY",
message: `I'm saying ${randomString(5)}`,
})
);
}, randomIntBetween(2000, 8000)); // say something every 2-8seconds
let timeout1id = setTimeout(function () {
clearInterval(intervalId);
console.log(
`VU ${__VU}:${id}: ${sessionDuration}ms passed, leaving the chat`
);
ws.send(JSON.stringify({ event: "LEAVE" }));
}, sessionDuration);
let timeout2id = setTimeout(function () {
console.log(
`Closing the socket forcefully 3s after graceful LEAVE`
);
ws.close();
}, sessionDuration + 3000);
ws.addEventListener("close", () => {
clearTimeout(timeout1id);
clearTimeout(timeout2id);
console.log(`VU ${__VU}:${id}: disconnected`);
});
});
}
This diff is collapsed.
import http from "k6/http";
import { sleep } from "k6";
let accessToken = "YOUR_GITHUB_ACCESS_TOKEN";
export default function() {
let query = `
query FindFirstIssue {
repository(owner:"grafana", name:"k6") {
issues(first:1) {
edges {
node {
id
number
title
}
}
}
}
}`;
let headers = {
'Authorization': `Bearer ${accessToken}`,
"Content-Type": "application/json"
};
let res = http.post("https://api.github.com/graphql",
JSON.stringify({ query: query }),
{headers: headers}
);
if (res.status === 200) {
console.log(JSON.stringify(res.body));
let body = JSON.parse(res.body);
let issue = body.data.repository.issues.edges[0].node;
console.log(issue.id, issue.number, issue.title);
let mutation = `
mutation AddReactionToIssue {
addReaction(input:{subjectId:"${issue.id}",content:HOORAY}) {
reaction {
content
}
subject {
id
}
}
}`;
res = http.post("https://api.github.com/graphql",
JSON.stringify({query: mutation}),
{headers: headers}
);
}
sleep(0.3);
}
import { Client, Stream } from 'k6/net/grpc';
import { sleep } from 'k6';
const COORD_FACTOR = 1e7;
// to run this sample, you need to start the grpc server first.
// to start the grpc server, run the following command in k6 repository's root:
// go run -mod=mod examples/grpc_server/*.go
// (golang should be installed)
const GRPC_ADDR = __ENV.GRPC_ADDR || '127.0.0.1:10000';
const GRPC_PROTO_PATH = __ENV.GRPC_PROTO_PATH || '../lib/testutils/grpcservice/route_guide.proto';
let client = new Client();
client.load([], GRPC_PROTO_PATH);
// a sample DB of points
const DB = [
{
location: { latitude: 407838351, longitude: -746143763 },
name: 'Patriots Path, Mendham, NJ 07945, USA',
},
{
location: { latitude: 408122808, longitude: -743999179 },
name: '101 New Jersey 10, Whippany, NJ 07981, USA',
},
{
location: { latitude: 413628156, longitude: -749015468 },
name: 'U.S. 6, Shohola, PA 18458, USA',
},
{
location: { latitude: 419999544, longitude: -740371136 },
name: '5 Conners Road, Kingston, NY 12401, USA',
},
{
location: { latitude: 414008389, longitude: -743951297 },
name: 'Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA',
},
{
location: { latitude: 419611318, longitude: -746524769 },
name: '287 Flugertown Road, Livingston Manor, NY 12758, USA',
},
{
location: { latitude: 406109563, longitude: -742186778 },
name: '4001 Tremley Point Road, Linden, NJ 07036, USA',
},
{
location: { latitude: 416802456, longitude: -742370183 },
name: '352 South Mountain Road, Wallkill, NY 12589, USA',
},
{
location: { latitude: 412950425, longitude: -741077389 },
name: 'Bailey Turn Road, Harriman, NY 10926, USA',
},
{
location: { latitude: 412144655, longitude: -743949739 },
name: '193-199 Wawayanda Road, Hewitt, NJ 07421, USA',
},
];
// to run this sample, you need to start the grpc server first.
// to start the grpc server, run the following command in k6 repository's root:
// go run -mod=mod examples/grpc_server/*.go
// (golang should be installed)
// the example below is based on the original GRPC client streaming example
//
// It sends several randomly chosen points from the pre-generated
// feature database with a variable delay in between. Prints the
// statistics when they are sent from the server.
export default () => {
if (__ITER == 0) {
client.connect(GRPC_ADDR, { plaintext: true });
}
const stream = new Stream(client, 'main.RouteGuide/RecordRoute');
stream.on('data', (stats) => {
console.log('Finished trip with', stats.pointCount, 'points');
console.log('Passed', stats.featureCount, 'features');
console.log('Travelled', stats.distance, 'meters');
console.log('It took', stats.elapsedTime, 'seconds');
});
stream.on('error', (err) => {
console.log('Stream Error: ' + JSON.stringify(err));
});
stream.on('end', () => {
client.close();
console.log('All done');
})
// send 5 random items
for (var i = 0; i < 5; i++) {
let point = DB[Math.floor(Math.random() * DB.length)];
pointSender(stream, point);
}
// close the client stream
stream.end();
};
const pointSender = (stream, point) => {
console.log(
'Visiting point ' +
point.name +
' ' +
point.location.latitude / COORD_FACTOR +
', ' +
point.location.longitude / COORD_FACTOR
);
// send the location to the server
stream.write(point.location);
sleep(0.5);
};
import grpc from 'k6/net/grpc';
import { check } from "k6";
// to run this sample, you need to start the grpc server first.
// to start the grpc server, run the following command in k6 repository's root:
// go run -mod=mod examples/grpc_server/*.go
// (golang should be installed)
const GRPC_ADDR = __ENV.GRPC_ADDR || '127.0.0.1:10000';
const GRPC_PROTO_PATH = __ENV.GRPC_PROTO_PATH || '../lib/testutils/grpcservice/route_guide.proto';
let client = new grpc.Client();
client.load([], GRPC_PROTO_PATH);
export default () => {
client.connect(GRPC_ADDR, { plaintext: true });
const response = client.invoke("main.FeatureExplorer/GetFeature", {
latitude: 410248224,
longitude: -747127767
})
check(response, { "status is OK": (r) => r && r.status === grpc.StatusOK });
console.log(JSON.stringify(response.message))
client.close()
}
import grpc from 'k6/net/grpc';
import {check} from "k6";
// to run this sample, you need to start the grpc server first.
// to start the grpc server, run the following command in k6 repository's root:
// go run -mod=mod examples/grpc_server/*.go
// (golang should be installed)
const GRPC_ADDR = __ENV.GRPC_ADDR || '127.0.0.1:10000';
let client = new grpc.Client();
export default () => {
client.connect(GRPC_ADDR, { plaintext: true, reflect: true });
const response = client.invoke('main.FeatureExplorer/GetFeature', {
latitude: 410248224,
longitude: -747127767,
});
check(response, { 'status is OK': (r) => r && r.status === grpc.StatusOK });
console.log(JSON.stringify(response.message));
client.close();
};
module go.k6.io/k6/examples/grpc_server
go 1.19
replace go.k6.io/k6 => ../../
require (
go.k6.io/k6 v0.0.0-00010101000000-000000000000
google.golang.org/grpc v1.58.3
)
require (
github.com/golang/protobuf v1.5.3 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect
google.golang.org/protobuf v1.31.0 // indirect
)
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package main implements a simple gRPC server that demonstrates how to use gRPC-Go libraries
// to perform unary, client streaming, server streaming and full duplex RPCs.
//
// It implements the route guide service whose definition can be found in routeguide/route_guide.proto.
package main
import (
"flag"
"fmt"
"log"
"math/rand"
"net"
"time"
"google.golang.org/grpc"
"go.k6.io/k6/lib/testutils/grpcservice"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/testdata"
"google.golang.org/grpc/reflection"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
var (
tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP")
certFile = flag.String("cert_file", "", "The TLS cert file")
keyFile = flag.String("key_file", "", "The TLS key file")
jsonDBFile = flag.String("json_db_file", "", "A json file containing a list of features")
port = flag.Int("port", 10000, "The server port")
)
func main() {
flag.Parse()
log.Printf("gRPC server starting on localhost:%d\n", *port)
lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
var opts []grpc.ServerOption
if *tls {
if *certFile == "" {
*certFile = testdata.Path("server1.pem")
}
if *keyFile == "" {
*keyFile = testdata.Path("server1.key")
}
creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile)
if err != nil {
log.Fatalf("Failed to generate credentials %v", err)
}
opts = []grpc.ServerOption{grpc.Creds(creds)}
}
features := grpcservice.LoadFeatures(*jsonDBFile)
grpcServer := grpc.NewServer(opts...)
grpcservice.RegisterRouteGuideServer(grpcServer, grpcservice.NewRouteGuideServer(features...))
grpcservice.RegisterFeatureExplorerServer(grpcServer, grpcservice.NewFeatureExplorerServer(features...))
reflection.Register(grpcServer)
grpcServer.Serve(lis)
}
import { Client, Stream } from 'k6/net/grpc';
import { sleep } from 'k6';
const COORD_FACTOR = 1e7;
// to run this sample, you need to start the grpc server first.
// to start the grpc server, run the following command in k6 repository's root:
// go run -mod=mod examples/grpc_server/*.go
// (golang should be installed)
const GRPC_ADDR = __ENV.GRPC_ADDR || '127.0.0.1:10000';
const GRPC_PROTO_PATH = __ENV.GRPC_PROTO_PATH || '../lib/testutils/grpcservice/route_guide.proto';
let client = new Client();
client.load([], GRPC_PROTO_PATH);
export default () => {
client.connect(GRPC_ADDR, { plaintext: true });
const stream = new Stream(client, 'main.FeatureExplorer/ListFeatures', null);
stream.on('data', function (feature) {
console.log(
'Found feature called "' +
feature.name +
'" at ' +
feature.location.latitude / COORD_FACTOR +
', ' +
feature.location.longitude / COORD_FACTOR
);
});
stream.on('end', function () {
// The server has finished sending
client.close();
console.log('All done');
});
stream.on('error', function (e) {
// An error has occurred and the stream has been closed.
console.log('Error: ' + JSON.stringify(e));
});
// send a message to the server
stream.write({
lo: {
latitude: 400000000,
longitude: -750000000,
},
hi: {
latitude: 420000000,
longitude: -730000000,
},
});
sleep(0.5);
};
import http from "k6/http";
import { check } from "k6";
// Our form data, to be URL-encoded and POSTed
const form_data = {
name: "Test Name",
telephone: "123456789",
email: "test@example.com",
comment: "Hello world!",
topping: [
'onion',
'bacon',
'cheese'
]
};
export default function() {
// Passing an object as the data parameter will automatically form-urlencode it
let res = http.post("http://httpbin.org/post", form_data);
// Verify response
check(res, {
"status is 200": (r) => r.status === 200,
"has correct name": (r) => r.json().form.name === form_data.name,
"has correct telephone number": (r) => r.json().form.telephone === form_data.telephone,
"has correct email": (r) => r.json().form.email === form_data.email,
"has correct comment": (r) => r.json().form.comment === form_data.comment,
"has correct toppings": (r) => JSON.stringify(r.json().form.topping) === JSON.stringify(form_data.topping)
});
}
import http from "k6/http";
import { check } from "k6";
export default function () {
check(http.get("https://test-api.k6.io/"), {
"status is 200": (r) => r.status == 200,
"protocol is HTTP/2": (r) => r.proto == "HTTP/2.0",
});
}
import encoding from "k6/encoding";
import http from "k6/http";
import { check } from "k6";
export default function() {
// Passing username and password as part of URL will authenticate using HTTP Basic Auth
let res = http.get("http://user:passwd@httpbin.org/basic-auth/user/passwd");
// Verify response
check(res, {
"status is 200": (r) => r.status === 200,
"is authenticated": (r) => r.json().authenticated === true,
"is correct user": (r) => r.json().user === "user"
});
// Alternatively you can create the header yourself to authenticate using HTTP Basic Auth
res = http.get("http://httpbin.org/basic-auth/user/passwd", { headers: { "Authorization": "Basic " + encoding.b64encode("user:passwd") }});
// Verify response
check(res, {
"status is 200": (r) => r.status === 200,
"is authenticated": (r) => r.json().authenticated === true,
"is correct user": (r) => r.json().user === "user"
});
}
import { check } from 'k6';
import http from 'k6/http';
export default function() {
const responses = http.batch([
"http://test.k6.io",
"http://test.k6.io/pi.php"
]);
check(responses[0], {
"main page 200": res => res.status === 200,
});
check(responses[1], {
"pi page 200": res => res.status === 200,
"pi page has right content": res => res.body === "3.14",
});
};
import http from "k6/http";
import { check } from "k6";
export default function() {
// Passing username and password as part of URL plus the auth option will authenticate using HTTP Digest authentication
let res = http.get("http://user:passwd@httpbin.org/digest-auth/auth/user/passwd", {auth: "digest"});
// Verify response
check(res, {
"status is 200": (r) => r.status === 200,
"is authenticated": (r) => r.json().authenticated === true,
"is correct user": (r) => r.json().user === "user"
});
}
import http from 'k6/http';
export default function () {
http.get('https://test-api.k6.io/');
};
import http from "k6/http";
import { check, group } from "k6";
/*
* k6 supports all standard HTTP verbs/methods:
* CONNECT, DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT and TRACE.
*
* Below are examples showing how to use the most common of these.
*/
export default function() {
// GET request
group("GET", function() {
let res = http.get("http://httpbin.org/get?verb=get");
check(res, {
"status is 200": (r) => r.status === 200,
"is verb correct": (r) => r.json().args.verb === "get",
});
});
// POST request
group("POST", function() {
let res = http.post("http://httpbin.org/post", { verb: "post" });
check(res, {
"status is 200": (r) => r.status === 200,
"is verb correct": (r) => r.json().form.verb === "post",
});
});
// PUT request
group("PUT", function() {
let res = http.put("http://httpbin.org/put", JSON.stringify({ verb: "put" }), { headers: { "Content-Type": "application/json" }});
check(res, {
"status is 200": (r) => r.status === 200,
"is verb correct": (r) => r.json().json.verb === "put",
});
});
// PATCH request
group("PATCH", function() {
let res = http.patch("http://httpbin.org/patch", JSON.stringify({ verb: "patch" }), { headers: { "Content-Type": "application/json" }});
check(res, {
"status is 200": (r) => r.status === 200,
"is verb correct": (r) => r.json().json.verb === "patch",
});
});
// DELETE request
group("DELETE", function() {
let res = http.del("http://httpbin.org/delete?verb=delete");
check(res, {
"status is 200": (r) => r.status === 200,
"is verb correct": (r) => r.json().args.verb === "delete",
});
});
}
import http from "k6/http";
import { check } from "k6";
export default function() {
// Send a JSON encoded POST request
let body = JSON.stringify({ key: "value" });
let res = http.post("http://httpbin.org/post", body, { headers: { "Content-Type": "application/json" }});
// Use JSON.parse to deserialize the JSON (instead of using the r.json() method)
let j = JSON.parse(res.body);
// Verify response
check(res, {
"status is 200": (r) => r.status === 200,
"is key correct": (r) => j.json.key === "value",
});
}
import crypto from "k6/crypto";
import encoding from "k6/encoding";
import {sleep} from "k6";
const algToHash = {
HS256: "sha256",
HS384: "sha384",
HS512: "sha512"
};
function sign(data, hashAlg, secret) {
let hasher = crypto.createHMAC(hashAlg, secret);
hasher.update(data);
// Some manual base64 rawurl encoding as `Hasher.digest(encodingType)`
// doesn't support that encoding type yet.
return hasher.digest("base64").replace(/\//g, "_").replace(/\+/g, "-").replace(/=/g, "");
}
function encode(payload, secret, algorithm) {
algorithm = algorithm || "HS256";
let header = encoding.b64encode(JSON.stringify({ typ: "JWT", alg: algorithm }), "rawurl");
payload = encoding.b64encode(JSON.stringify(payload), "rawurl", "s");
let sig = sign(header + "." + payload, algToHash[algorithm], secret);
return [header, payload, sig].join(".");
}
function decode(token, secret, algorithm) {
let parts = token.split('.');
let header = JSON.parse(encoding.b64decode(parts[0], "rawurl", "s"));
let payload = JSON.parse(encoding.b64decode(parts[1], "rawurl", "s"));
algorithm = algorithm || algToHash[header.alg];
if (sign(parts[0] + "." + parts[1], algorithm, secret) != parts[2]) {
throw Error("JWT signature verification failed");
}
return payload;
}
export default function() {
let message = { key2: "value2" };
let token = encode(message, "secret");
console.log("encoded", token);
let payload = decode(token, "secret");
console.log("decoded", JSON.stringify(payload));
sleep(1)
}
import { group, check } from "k6";
import http from "k6/http";
export let options = {
thresholds: {
'http_req_duration{kind:html}': ["avg<=10"],
'http_req_duration{kind:css}': ["avg<=10"],
'http_req_duration{kind:img}': ["avg<=100"],
'http_reqs': ["rate>100"],
}
};
export default function() {
group("front page", function() {
check(http.get("http://localhost:8080/", {
tags: {'kind': 'html' },
}), {
"status is 200": (res) => res.status === 200,
});
});
group("stylesheet", function() {
check(http.get("http://localhost:8080/style.css", {
tags: {'kind': 'css' },
}), {
"status is 200": (res) => res.status === 200,
});
});
group("image", function() {
check(http.get("http://localhost:8080/teddy.jpg", {
tags: {'kind': 'img' },
}), {
"status is 200": (res) => res.status === 200,
});
});
}
export default function() {
console.log("log", "a", "b");
console.debug("debug", "a", "b");
console.info("info", "a", "b");
console.warn("warn", "a", "b");
console.error("error", "a", "b");
}
export default function() {
while (true) {
// do nothing, forever!!
}
}
import http from "k6/http";
import { check } from "k6";
export default function() {
let res = http.get("https://stackoverflow.com");
check(res, {
"is OCSP response good": (r) => r.ocsp.status === http.OCSP_STATUS_GOOD
});
};
This diff is collapsed.
This diff is collapsed.
import http from "k6/http";
import {check} from "k6";
export let options = {
// Max redirects to follow (default is 10)
maxRedirects: 5
};
export default function() {
// If redirecting more than options.maxRedirects times, the last response will be returned
let res = http.get("https://httpbin.org/redirect/6");
check(res, {
"is status 302": (r) => r.status === 302
});
// The number of redirects to follow can be controlled on a per-request level as well
res = http.get("https://httpbin.org/redirect/1", {redirects: 1});
console.log(res.status);
check(res, {
"is status 200": (r) => r.status === 200,
"url is correct": (r) => r.url === "https://httpbin.org/get"
});
}
export function f1() {
throw "line 2";
}
export function f2() {
throw "line 6";
}
export function f3() {
throw "line 10";
}
import { f2 } from "./imported.js"
export default function() {
f2();
}
var o={d:(e,r)=>{for(var t in r)o.o(r,t)&&!o.o(e,t)&&Object.defineProperty(e,t,{enumerable:!0,get:r[t]})},o:(o,e)=>Object.prototype.hasOwnProperty.call(o,e)},e={};o.d(e,{Z:()=>r});const r=()=>{!function(o){throw"cool is cool"}()};var t=e.Z;export{t as default};
//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIndlYnBhY2s6Ly8vd2VicGFjay9ib290c3RyYXAiLCJ3ZWJwYWNrOi8vL3dlYnBhY2svcnVudGltZS9kZWZpbmUgcHJvcGVydHkgZ2V0dGVycyIsIndlYnBhY2s6Ly8vd2VicGFjay9ydW50aW1lL2hhc093blByb3BlcnR5IHNob3J0aGFuZCIsIndlYnBhY2s6Ly8vLi90ZXN0MS50cyJdLCJuYW1lcyI6WyJfX3dlYnBhY2tfcmVxdWlyZV9fIiwiZXhwb3J0cyIsImRlZmluaXRpb24iLCJrZXkiLCJvIiwiT2JqZWN0IiwiZGVmaW5lUHJvcGVydHkiLCJlbnVtZXJhYmxlIiwiZ2V0Iiwib2JqIiwicHJvcCIsInByb3RvdHlwZSIsImhhc093blByb3BlcnR5IiwiY2FsbCIsInMiLCJjb29sVGhyb3ciXSwibWFwcGluZ3MiOiJBQUNBLElBQUlBLEVBQXNCLENDQTFCLEVBQXdCLENBQUNDLEVBQVNDLEtBQ2pDLElBQUksSUFBSUMsS0FBT0QsRUFDWEYsRUFBb0JJLEVBQUVGLEVBQVlDLEtBQVNILEVBQW9CSSxFQUFFSCxFQUFTRSxJQUM1RUUsT0FBT0MsZUFBZUwsRUFBU0UsRUFBSyxDQUFFSSxZQUFZLEVBQU1DLElBQUtOLEVBQVdDLE1DSjNFLEVBQXdCLENBQUNNLEVBQUtDLElBQVVMLE9BQU9NLFVBQVVDLGVBQWVDLEtBQUtKLEVBQUtDLEksc0JDR2xGLGNBSEEsU0FBbUJJLEdBQ2YsS0FBTSxlQUdOQyxJIiwiZmlsZSI6InRlc3QxLmpzIiwic291cmNlc0NvbnRlbnQiOlsiLy8gVGhlIHJlcXVpcmUgc2NvcGVcbnZhciBfX3dlYnBhY2tfcmVxdWlyZV9fID0ge307XG5cbiIsIi8vIGRlZmluZSBnZXR0ZXIgZnVuY3Rpb25zIGZvciBoYXJtb255IGV4cG9ydHNcbl9fd2VicGFja19yZXF1aXJlX18uZCA9IChleHBvcnRzLCBkZWZpbml0aW9uKSA9PiB7XG5cdGZvcih2YXIga2V5IGluIGRlZmluaXRpb24pIHtcblx0XHRpZihfX3dlYnBhY2tfcmVxdWlyZV9fLm8oZGVmaW5pdGlvbiwga2V5KSAmJiAhX193ZWJwYWNrX3JlcXVpcmVfXy5vKGV4cG9ydHMsIGtleSkpIHtcblx0XHRcdE9iamVjdC5kZWZpbmVQcm9wZXJ0eShleHBvcnRzLCBrZXksIHsgZW51bWVyYWJsZTogdHJ1ZSwgZ2V0OiBkZWZpbml0aW9uW2tleV0gfSk7XG5cdFx0fVxuXHR9XG59OyIsIl9fd2VicGFja19yZXF1aXJlX18ubyA9IChvYmosIHByb3ApID0+IChPYmplY3QucHJvdG90eXBlLmhhc093blByb3BlcnR5LmNhbGwob2JqLCBwcm9wKSkiLCJmdW5jdGlvbiBjb29sVGhyb3coczogc3RyaW5nKSB7XG4gICAgdGhyb3cgXCJjb29sIFwiKyBzXG59XG5leHBvcnQgZGVmYXVsdCAoKSA9PiB7XG4gICAgY29vbFRocm93KFwiaXMgY29vbFwiKVxufTtcbiJdLCJzb3VyY2VSb290IjoiIn0=
\ No newline at end of file
function coolThrow(s: string) {
throw "cool "+ s
}
export default () => {
coolThrow("is cool")
};
import http from "k6/http";
import { check } from "k6";
/*
* Stages (aka ramping) is how you, in code, specify the ramping of VUs.
* That is, how many VUs should be active and generating traffic against
* the target system at any specific point in time for the duration of
* the test.
*
* The following stages configuration will result in up-flat-down ramping
* profile over a 20s total test duration.
*/
export let options = {
stages: [
// Ramp-up from 1 to 5 VUs in 10s
{ duration: "10s", target: 1000 },
// Stay at rest on 5 VUs for 5s
{ duration: "5s", target: 1000 },
// Ramp-down from 5 to 0 VUs for 5s
{ duration: "5s", target: 0 }
]
};
export default function() {
let res = http.get("http://192.168.1.220:8000/");
check(res, { "status is 200": (r) => r.status === 200 });
}
import http from "k6/http";
import { check } from "k6";
/*
* Stages (aka ramping) is how you, in code, specify the ramping of VUs.
* That is, how many VUs should be active and generating traffic against
* the target system at any specific point in time for the duration of
* the test.
*
* The following stages configuration will result in up-flat-down ramping
* profile over a 20s total test duration.
*/
export let options = {
stages: [
// Ramp-up from 1 to 5 VUs in 10s
{ duration: "10s", target: 5 },
// Stay at rest on 5 VUs for 5s
{ duration: "5s", target: 5 },
// Ramp-down from 5 to 0 VUs for 5s
{ duration: "5s", target: 0 }
]
};
export default function() {
let res = http.get("http://192.168.1.220:8000/");
check(res, { "status is 200": (r) => r.status === 200 });
}
import http from "k6/http";
import { Trend } from "k6/metrics";
import { check } from "k6";
/*
* Checks, custom metrics and requests can be tagged with any number of tags.
*
* Tags can be used for:
* - Creating metric thresholds by filtering the metric data stream based on tags
* - Aid result analysis by allowing for more precise filtering of metrics
*/
let myTrend = new Trend("my_trend");
export default function() {
// Add tag to request metric data
let res = http.get("http://httpbin.org/", { tags: { my_tag: "I'm a tag" } });
// Add tag to check
check(res, { "status is 200": (r) => r.status === 200 }, { my_tag: "I'm a tag" });
// Add tag to custom metric
myTrend.add(res.timings.connecting, { my_tag: "I'm a tag" });
}
import http from "k6/http";
import { check } from "k6";
/*
* Thresholds are used to specify where a metric crosses into unacceptable
* territory. If a threshold is crossed the test is considered a failure
* and is marked as such by the program through a non-zero exit code.
*
* Thresholds are specified as part of the options structure. It's a set of
* key/value pairs where the name specifies the metric to watch (with optional
* tag filtering) and the values are JS expressions. Which could be a simple
* number or involve a statistical aggregate like avg, max, percentiles etc.
*/
export let options = {
thresholds: {
// Declare a threshold over all HTTP response times,
// the 95th percentile should not cross 500ms
http_req_duration: ["p(95)<500"],
// Declare a threshold over HTTP response times for all data points
// where the URL tag is equal to "http://httpbin.org/post",
// the max should not cross 1000ms
"http_req_duration{name:http://httpbin.org/post}": ["max<1000"],
}
};
export default function() {
http.get("http://httpbin.org/");
http.post("http://httpbin.org/post", {data: "some data"});
}
import http from "k6/http";
import { check, group, sleep } from "k6";
import { Rate } from "k6/metrics";
// A custom metric to track failure rates
var failureRate = new Rate("check_failure_rate");
// Options
export let options = {
stages: [
// Linearly ramp up from 1 to 50 VUs during first minute
{ target: 50, duration: "1m" },
// Hold at 50 VUs for the next 3 minutes and 30 seconds
{ target: 50, duration: "3m30s" },
// Linearly ramp down from 50 to 0 50 VUs over the last 30 seconds
{ target: 0, duration: "30s" }
// Total execution time will be ~5 minutes
],
thresholds: {
// We want the 95th percentile of all HTTP request durations to be less than 500ms
"http_req_duration": ["p(95)<500"],
// Requests with the staticAsset tag should finish even faster
"http_req_duration{staticAsset:yes}": ["p(99)<250"],
// Thresholds based on the custom metric we defined and use to track application failures
"check_failure_rate": [
// Global failure rate should be less than 1%
"rate<0.01",
// Abort the test early if it climbs over 5%
{ threshold: "rate<=0.05", abortOnFail: true },
],
},
};
// Main function
export default function () {
let response = http.get("https://test.k6.io/");
// check() returns false if any of the specified conditions fail
let checkRes = check(response, {
"http2 is used": (r) => r.proto === "HTTP/2.0",
"status is 200": (r) => r.status === 200,
"content is present": (r) => r.body.indexOf("Collection of simple web-pages suitable for load testing.") !== -1,
});
// We reverse the check() result since we want to count the failures
failureRate.add(!checkRes);
// Load static assets, all requests
group("Static Assets", function () {
// Execute multiple requests in parallel like a browser, to fetch some static resources
let resps = http.batch([
["GET", "https://test.k6.io/static/css/site.css", null, { tags: { staticAsset: "yes" } }],
["GET", "https://test.k6.io/static/favicon.ico", null, { tags: { staticAsset: "yes" } }],
["GET", "https://test.k6.io/static/js/prisms.js", null, { tags: { staticAsset: "yes" } }],
]);
// Combine check() call with failure tracking
failureRate.add(!check(resps, {
"status is 200": (r) => r[0].status === 200 && r[1].status === 200,
"reused connection": (r) => r[0].timings.connecting == 0,
}));
});
sleep(Math.random() * 3 + 2); // Random sleep between 2s and 5s
}
import http from "k6/http";
import { check } from "k6";
export let options = {
// When this option is enabled (set to true), all of the verifications
// that would otherwise be done to establish trust in a server provided
// TLS certificate will be ignored.
insecureSkipTLSVerify: true
};
export default function() {
let res = http.get("https://httpbin.org/");
check(res, { "status is 200": (r) => r.status === 200 });
}
import http from "k6/http";
import { check } from "k6";
export let options = {
tlsCipherSuites: [
"TLS_RSA_WITH_RC4_128_SHA",
"TLS_RSA_WITH_AES_128_GCM_SHA256",
],
tlsVersion: {
min: "tls1.0",
max: "tls1.2"
}
};
export default function() {
let res = http.get("https://sha256.badssl.com");
check(res, {
"is TLSv1.2": (r) => r.tls_version === http.TLS_1_2,
"is sha256 cipher suite": (r) => r.tls_cipher_suite === "TLS_RSA_WITH_AES_128_GCM_SHA256"
});
};
import ws from "k6/ws";
import { check } from "k6";
export default function () {
var url = "ws://echo.websocket.org";
var params = { "tags": { "my_tag": "hello" } };
var response = ws.connect(url, params, function (socket) {
socket.on('open', function open() {
console.log('connected');
socket.send(Date.now());
socket.setInterval(function timeout() {
socket.ping();
console.log("Pinging every 1sec (setInterval test)");
}, 1000);
});
socket.on('ping', function () {
console.log("PING!");
});
socket.on('pong', function () {
console.log("PONG!");
});
socket.on('pong', function () {
// Multiple event handlers on the same event
console.log("OTHER PONG!");
});
socket.on('message', function incoming(data) {
console.log(`Roundtrip time: ${Date.now() - data} ms`);
socket.setTimeout(function timeout() {
socket.send(Date.now());
}, 500);
});
socket.on('close', function close() {
console.log('disconnected');
});
socket.on('error', function (e) {
if (e.error() != "websocket: close sent") {
console.log('An unexpected error occurred: ', e.error());
}
});
socket.setTimeout(function () {
console.log('2 seconds passed, closing the socket');
socket.close();
}, 2000);
});
check(response, { "status is 101": (r) => r && r.status === 101 });
};
/*
This is a k6 test script that imports the xk6-kafka and
tests Kafka by sending 200 Avro messages per iteration
without any associated key.
*/
import { check } from "k6";
import {
Writer,
Reader,
Connection,
SchemaRegistry,
SCHEMA_TYPE_AVRO,
} from "k6/x/kafka"; // import kafka extension
const brokers = ["localhost:9092"];
const topic = "xk6_kafka_avro_topic";
const writer = new Writer({
brokers: brokers,
topic: topic,
autoCreateTopic: true,
});
const reader = new Reader({
brokers: brokers,
topic: topic,
});
const connection = new Connection({
address: brokers[0],
});
const schemaRegistry = new SchemaRegistry();
if (__VU == 0) {
connection.createTopic({ topic: topic });
}
const valueSchema = JSON.stringify({
type: "record",
name: "Value",
namespace: "dev.mostafa.xk6.kafka",
fields: [
{
name: "name",
type: "string",
},
],
});
export default function () {
for (let index = 0; index < 100; index++) {
let messages = [
{
value: schemaRegistry.serialize({
data: {
name: "xk6-kafka",
},
schema: { schema: valueSchema },
schemaType: SCHEMA_TYPE_AVRO,
}),
},
];
writer.produce({ messages: messages });
}
// Read 10 messages only
let messages = reader.consume({ limit: 10 });
check(messages, {
"10 messages returned": (msgs) => msgs.length == 10,
"value is correct": (msgs) =>
schemaRegistry.deserialize({
data: msgs[0].value,
schema: { schema: valueSchema },
schemaType: SCHEMA_TYPE_AVRO,
}).name == "xk6-kafka",
});
}
export function teardown(data) {
if (__VU == 0) {
// Delete the topic
connection.deleteTopic(topic);
}
writer.close();
reader.close();
connection.close();
}
/*
This is a k6 test script that imports the xk6-kafka and
tests Kafka with a 100 Avro messages per iteration.
*/
import { check } from "k6";
import {
Writer,
Reader,
Connection,
SchemaRegistry,
KEY,
VALUE,
TOPIC_NAME_STRATEGY,
RECORD_NAME_STRATEGY,
SCHEMA_TYPE_AVRO,
} from "k6/x/kafka"; // import kafka extension
const brokers = ["localhost:9092"];
const topic = "com.example.person";
const writer = new Writer({
brokers: brokers,
topic: topic,
autoCreateTopic: true,
});
const reader = new Reader({
brokers: brokers,
topic: topic,
});
const connection = new Connection({
address: brokers[0],
});
const schemaRegistry = new SchemaRegistry({
url: "http://localhost:8081",
});
if (__VU == 0) {
connection.createTopic({ topic: topic });
}
const keySchema = `{
"name": "KeySchema",
"type": "record",
"namespace": "com.example.key",
"fields": [
{
"name": "ssn",
"type": "string"
}
]
}
`;
const valueSchema = `{
"name": "ValueSchema",
"type": "record",
"namespace": "com.example.value",
"fields": [
{
"name": "firstName",
"type": "string"
},
{
"name": "lastName",
"type": "string"
}
]
}`;
const keySubjectName = schemaRegistry.getSubjectName({
topic: topic,
element: KEY,
subjectNameStrategy: TOPIC_NAME_STRATEGY,
schema: keySchema,
});
const valueSubjectName = schemaRegistry.getSubjectName({
topic: topic,
element: VALUE,
subjectNameStrategy: RECORD_NAME_STRATEGY,
schema: valueSchema,
});
const keySchemaObject = schemaRegistry.createSchema({
subject: keySubjectName,
schema: keySchema,
schemaType: SCHEMA_TYPE_AVRO,
});
const valueSchemaObject = schemaRegistry.createSchema({
subject: valueSubjectName,
schema: valueSchema,
schemaType: SCHEMA_TYPE_AVRO,
});
export default function () {
for (let index = 0; index < 100; index++) {
let messages = [
{
key: schemaRegistry.serialize({
data: {
ssn: "ssn-" + index,
},
schema: keySchemaObject,
schemaType: SCHEMA_TYPE_AVRO,
}),
value: schemaRegistry.serialize({
data: {
firstName: "firstName-" + index,
lastName: "lastName-" + index,
},
schema: valueSchemaObject,
schemaType: SCHEMA_TYPE_AVRO,
}),
},
];
writer.produce({ messages: messages });
}
let messages = reader.consume({ limit: 20 });
check(messages, {
"20 message returned": (msgs) => msgs.length == 20,
"key starts with 'ssn-' string": (msgs) =>
schemaRegistry
.deserialize({
data: msgs[0].key,
schema: keySchemaObject,
schemaType: SCHEMA_TYPE_AVRO,
})
.ssn.startsWith("ssn-"),
"value contains 'firstName-' and 'lastName-' strings": (msgs) =>
schemaRegistry
.deserialize({
data: msgs[0].value,
schema: valueSchemaObject,
schemaType: SCHEMA_TYPE_AVRO,
})
.firstName.startsWith("firstName-") &&
schemaRegistry
.deserialize({
data: msgs[0].value,
schema: valueSchemaObject,
schemaType: SCHEMA_TYPE_AVRO,
})
.lastName.startsWith("lastName-"),
});
}
export function teardown(data) {
if (__VU == 0) {
// Delete the topic
connection.deleteTopic(topic);
}
writer.close();
reader.close();
connection.close();
}
/*
This is a k6 test script that imports the xk6-kafka and
tests Kafka with a 200 byte array messages per iteration.
*/
import { check } from "k6";
import {
Writer,
Reader,
Connection,
SchemaRegistry,
SCHEMA_TYPE_BYTES,
} from "k6/x/kafka"; // import kafka extension
const brokers = ["localhost:9092"];
const topic = "xk6_kafka_byte_array_topic";
const writer = new Writer({
brokers: brokers,
topic: topic,
autoCreateTopic: true,
});
const reader = new Reader({
brokers: brokers,
topic: topic,
});
const connection = new Connection({
address: brokers[0],
});
const schemaRegistry = new SchemaRegistry();
if (__VU == 0) {
connection.createTopic({ topic: topic });
}
const payload = "byte array payload";
export default function () {
for (let index = 0; index < 100; index++) {
let messages = [
{
// The data type of the key is a string
key: schemaRegistry.serialize({
data: Array.from("test-id-abc-" + index, (x) => x.charCodeAt(0)),
schemaType: SCHEMA_TYPE_BYTES,
}),
// The data type of the value is a byte array
value: schemaRegistry.serialize({
data: Array.from(payload, (x) => x.charCodeAt(0)),
schemaType: SCHEMA_TYPE_BYTES,
}),
},
{
key: schemaRegistry.serialize({
data: Array.from("test-id-def-" + index, (x) => x.charCodeAt(0)),
schemaType: SCHEMA_TYPE_BYTES,
}),
value: schemaRegistry.serialize({
data: Array.from(payload, (x) => x.charCodeAt(0)),
schemaType: SCHEMA_TYPE_BYTES,
}),
},
];
writer.produce({
messages: messages,
});
}
// Read 10 messages only
let messages = reader.consume({ limit: 10 });
check(messages, {
"10 messages returned": (msgs) => msgs.length == 10,
"key starts with 'test-id-' string": (msgs) =>
String.fromCharCode(
...schemaRegistry.deserialize({
data: msgs[0].key,
schemaType: SCHEMA_TYPE_BYTES,
}),
).startsWith("test-id-"),
"value is correct": (msgs) =>
String.fromCharCode(
...schemaRegistry.deserialize({
data: msgs[0].value,
schemaType: SCHEMA_TYPE_BYTES,
}),
) == payload,
});
}
export function teardown(data) {
if (__VU == 0) {
// Delete the topic
connection.deleteTopic(topic);
}
writer.close();
reader.close();
connection.close();
}
/*
This is a k6 test script that imports the xk6-kafka and
tests Kafka with a 200 JSON messages per iteration.
*/
import { check } from "k6";
// import * as kafka from "k6/x/kafka";
import {
Writer,
Reader,
Connection,
SchemaRegistry,
CODEC_SNAPPY,
SCHEMA_TYPE_JSON,
} from "k6/x/kafka"; // import kafka extension
// Prints module-level constants
// console.log(kafka);
const brokers = ["localhost:9092"];
const topic = "xk6_kafka_consumer_group_topic";
const groupID = "my-group";
const writer = new Writer({
brokers: brokers,
topic: topic,
compression: CODEC_SNAPPY,
});
const reader = new Reader({
brokers: brokers,
groupID: groupID,
groupTopics: [topic],
});
const connection = new Connection({
address: brokers[0],
});
const schemaRegistry = new SchemaRegistry();
if (__VU == 0) {
connection.createTopic({
topic: topic,
numPartitions: 3,
replicationFactor: 1,
configEntries: [
{
configName: "compression.type",
configValue: CODEC_SNAPPY,
},
],
});
}
export const options = {
thresholds: {
// Base thresholds to see if the writer or reader is working
kafka_writer_error_count: ["count == 0"],
kafka_reader_error_count: ["count == 0"],
},
};
export default function () {
let messages = [];
for (let i = 0; i < 100; i++) {
for (let partition = 0; partition < 3; partition++) {
messages.push({
// The data type of the key is JSON
key: schemaRegistry.serialize({
data: {
key: "value",
},
schemaType: SCHEMA_TYPE_JSON,
}),
// The data type of the value is JSON
value: schemaRegistry.serialize({
data: {
key: "value",
},
schemaType: SCHEMA_TYPE_JSON,
}),
parition: partition,
});
}
}
writer.produce({ messages: messages });
// Read one message only
messages = reader.consume({ limit: 10 });
check(messages, {
"10 messages is received": (messages) => messages.length == 10,
});
check(messages[0], {
"Topic equals to xk6_kafka_consumer_group_topic": (msg) =>
msg["topic"] == topic,
"Key contains key/value and is JSON": (msg) =>
schemaRegistry.deserialize({
data: msg.key,
schemaType: SCHEMA_TYPE_JSON,
}).key == "value",
"Value contains key/value and is JSON": (msg) =>
typeof schemaRegistry.deserialize({
data: msg.value,
schemaType: SCHEMA_TYPE_JSON,
}) == "object" &&
schemaRegistry.deserialize({
data: msg.value,
schemaType: SCHEMA_TYPE_JSON,
}).key == "value",
});
}
export function teardown(data) {
writer.close();
reader.close();
connection.close();
}
/*
This is a k6 test script that imports the xk6-kafka and
tests Kafka with a 200 JSON messages per iteration.
*/
import { check } from "k6";
// import * as kafka from "k6/x/kafka";
import {
Writer,
Reader,
Connection,
SchemaRegistry,
CODEC_SNAPPY,
SCHEMA_TYPE_JSON,
} from "k6/x/kafka"; // import kafka extension
// Prints module-level constants
// console.log(kafka);
const brokers = ["localhost:9092"];
const topic = "xk6_kafka_json_topic";
const writer = new Writer({
brokers: brokers,
topic: topic,
autoCreateTopic: true,
compression: CODEC_SNAPPY,
});
const reader = new Reader({
brokers: brokers,
topic: topic,
});
const connection = new Connection({
address: brokers[0],
});
const schemaRegistry = new SchemaRegistry();
if (__VU == 0) {
connection.createTopic({
topic: topic,
configEntries: [
{
configName: "compression.type",
configValue: CODEC_SNAPPY,
},
],
});
}
export const options = {
thresholds: {
// Base thresholds to see if the writer or reader is working
kafka_writer_error_count: ["count == 0"],
kafka_reader_error_count: ["count == 0"],
},
};
export default function () {
for (let index = 0; index < 100; index++) {
let messages = [
{
// The data type of the key is JSON
key: schemaRegistry.serialize({
data: {
correlationId: "test-id-abc-" + index,
},
schemaType: SCHEMA_TYPE_JSON,
}),
// The data type of the value is JSON
value: schemaRegistry.serialize({
data: {
name: "xk6-kafka",
version: "0.9.0",
author: "Mostafa Moradian",
description:
"k6 extension to load test Apache Kafka with support for Avro messages",
index: index,
},
schemaType: SCHEMA_TYPE_JSON,
}),
headers: {
mykey: "myvalue",
},
offset: index,
partition: 0,
time: new Date(), // Will be converted to timestamp automatically
},
{
key: schemaRegistry.serialize({
data: {
correlationId: "test-id-def-" + index,
},
schemaType: SCHEMA_TYPE_JSON,
}),
value: schemaRegistry.serialize({
data: {
name: "xk6-kafka",
version: "0.9.0",
author: "Mostafa Moradian",
description:
"k6 extension to load test Apache Kafka with support for Avro messages",
index: index,
},
schemaType: SCHEMA_TYPE_JSON,
}),
headers: {
mykey: "myvalue",
},
},
];
writer.produce({ messages: messages });
}
// Read 10 messages only
let messages = reader.consume({ limit: 10 });
check(messages, {
"10 messages are received": (messages) => messages.length == 10,
});
check(messages[0], {
"Topic equals to xk6_kafka_json_topic": (msg) => msg["topic"] == topic,
"Key contains key/value and is JSON": (msg) =>
schemaRegistry
.deserialize({ data: msg.key, schemaType: SCHEMA_TYPE_JSON })
.correlationId.startsWith("test-id-"),
"Value contains key/value and is JSON": (msg) =>
typeof schemaRegistry.deserialize({
data: msg.value,
schemaType: SCHEMA_TYPE_JSON,
}) == "object" &&
schemaRegistry.deserialize({
data: msg.value,
schemaType: SCHEMA_TYPE_JSON,
}).name == "xk6-kafka",
"Header equals {'mykey': 'myvalue'}": (msg) =>
"mykey" in msg.headers &&
String.fromCharCode(...msg.headers["mykey"]) == "myvalue",
"Time is past": (msg) => new Date(msg["time"]) < new Date(),
"Partition is zero": (msg) => msg["partition"] == 0,
"Offset is gte zero": (msg) => msg["offset"] >= 0,
"High watermark is gte zero": (msg) => msg["highWaterMark"] >= 0,
});
}
export function teardown(data) {
if (__VU == 0) {
// Delete the topic
connection.deleteTopic(topic);
}
writer.close();
reader.close();
connection.close();
}
/*
This is a k6 test script that imports the xk6-kafka and
tests Kafka with a 100 Avro messages per iteration.
*/
import { check } from "k6";
import {
Writer,
Reader,
Connection,
SchemaRegistry,
KEY,
VALUE,
SCHEMA_TYPE_JSON,
TOPIC_NAME_STRATEGY,
} from "k6/x/kafka"; // import kafka extension
const brokers = ["localhost:9092"];
const topic = "xk6_jsonschema_test";
const writer = new Writer({
brokers: brokers,
topic: topic,
autoCreateTopic: true,
});
const reader = new Reader({
brokers: brokers,
topic: topic,
});
const connection = new Connection({
address: brokers[0],
});
const schemaRegistry = new SchemaRegistry({
url: "http://localhost:8081",
});
if (__VU == 0) {
connection.createTopic({ topic: topic });
}
const keySchema = JSON.stringify({
title: "Key",
type: "object",
properties: {
key: {
type: "string",
description: "A key.",
},
},
});
const valueSchema = JSON.stringify({
title: "Value",
type: "object",
properties: {
firstName: {
type: "string",
description: "First name.",
},
lastName: {
type: "string",
description: "Last name.",
},
},
});
const keySubjectName = schemaRegistry.getSubjectName({
topic: topic,
element: KEY,
subjectNameStrategy: TOPIC_NAME_STRATEGY,
schema: keySchema,
});
const valueSubjectName = schemaRegistry.getSubjectName({
topic: topic,
element: VALUE,
subjectNameStrategy: TOPIC_NAME_STRATEGY,
schema: valueSchema,
});
const keySchemaObject = schemaRegistry.createSchema({
subject: keySubjectName,
schema: keySchema,
schemaType: SCHEMA_TYPE_JSON,
});
const valueSchemaObject = schemaRegistry.createSchema({
subject: valueSubjectName,
schema: valueSchema,
schemaType: SCHEMA_TYPE_JSON,
});
export default function () {
for (let index = 0; index < 100; index++) {
let messages = [
{
key: schemaRegistry.serialize({
data: {
key: "key-" + index,
},
schema: keySchemaObject,
schemaType: SCHEMA_TYPE_JSON,
}),
value: schemaRegistry.serialize({
data: {
firstName: "firstName-" + index,
lastName: "lastName-" + index,
},
schema: valueSchemaObject,
schemaType: SCHEMA_TYPE_JSON,
}),
},
];
writer.produce({ messages: messages });
}
let messages = reader.consume({ limit: 20 });
check(messages, {
"20 message returned": (msgs) => msgs.length == 20,
});
check(messages, {
"20 message returned": (msgs) => msgs.length == 20,
"key starts with 'key-' string": (msgs) =>
schemaRegistry
.deserialize({
data: msgs[0].key,
schema: keySchemaObject,
schemaType: SCHEMA_TYPE_JSON,
})
.key.startsWith("key-"),
"value contains 'firstName-' and 'lastName-' strings": (msgs) =>
schemaRegistry
.deserialize({
data: msgs[0].value,
schema: valueSchemaObject,
schemaType: SCHEMA_TYPE_JSON,
})
.firstName.startsWith("firstName-") &&
schemaRegistry
.deserialize({
data: msgs[0].value,
schema: valueSchemaObject,
schemaType: SCHEMA_TYPE_JSON,
})
.lastName.startsWith("lastName-"),
});
}
export function teardown(data) {
if (__VU == 0) {
// Delete the topic
connection.deleteTopic(topic);
}
writer.close();
reader.close();
connection.close();
}
/*
This is a k6 test script that imports the xk6-kafka and
tests Kafka with a 200 JSON messages per iteration. It
also uses SASL authentication.
*/
import { check } from "k6";
import {
Writer,
Reader,
Connection,
SchemaRegistry,
SCHEMA_TYPE_JSON,
SASL_PLAIN,
TLS_1_2,
} from "k6/x/kafka"; // import kafka extension
export const options = {
// This is used for testing purposes. For real-world use, you should use your own options:
// https://k6.io/docs/using-k6/k6-options/
scenarios: {
sasl_auth: {
executor: "constant-vus",
vus: 1,
duration: "10s",
gracefulStop: "1s",
},
},
};
const brokers = ["localhost:9092"];
const topic = "xk6_kafka_json_topic";
// SASL config is optional
const saslConfig = {
username: "client",
password: "client-secret",
// Possible values for the algorithm is:
// NONE (default)
// SASL_PLAIN
// SASL_SCRAM_SHA256
// SASL_SCRAM_SHA512
// SASL_SSL (must enable TLS)
// SASL_AWS_IAM (configurable via env or AWS IAM config files - no username/password needed)
algorithm: SASL_PLAIN,
};
// TLS config is optional
const tlsConfig = {
// Enable/disable TLS (default: false)
enableTls: false,
// Skip TLS verification if the certificate is invalid or self-signed (default: false)
insecureSkipTlsVerify: false,
// Possible values:
// TLS_1_0
// TLS_1_1
// TLS_1_2 (default)
// TLS_1_3
minVersion: TLS_1_2,
// Only needed if you have a custom or self-signed certificate and keys
// clientCertPem: "/path/to/your/client.pem",
// clientKeyPem: "/path/to/your/client-key.pem",
// serverCaPem: "/path/to/your/ca.pem",
};
const offset = 0;
// partition and groupId are mutually exclusive
const partition = 0;
const numPartitions = 1;
const replicationFactor = 1;
const writer = new Writer({
brokers: brokers,
topic: topic,
sasl: saslConfig,
tls: tlsConfig,
});
const reader = new Reader({
brokers: brokers,
topic: topic,
partition: partition,
offset: offset,
sasl: saslConfig,
tls: tlsConfig,
});
const connection = new Connection({
address: brokers[0],
sasl: saslConfig,
tls: tlsConfig,
});
const schemaRegistry = new SchemaRegistry();
if (__VU == 0) {
connection.createTopic({
topic: topic,
numPartitions: numPartitions,
replicationFactor: replicationFactor,
});
console.log(
"Existing topics: ",
connection.listTopics(saslConfig, tlsConfig),
);
}
export default function () {
for (let index = 0; index < 100; index++) {
let messages = [
{
key: schemaRegistry.serialize({
data: {
correlationId: "test-id-abc-" + index,
},
schemaType: SCHEMA_TYPE_JSON,
}),
value: schemaRegistry.serialize({
data: {
name: "xk6-kafka",
},
schemaType: SCHEMA_TYPE_JSON,
}),
},
{
key: schemaRegistry.serialize({
data: {
correlationId: "test-id-def-" + index,
},
schemaType: SCHEMA_TYPE_JSON,
}),
value: schemaRegistry.serialize({
data: {
name: "xk6-kafka",
},
schemaType: SCHEMA_TYPE_JSON,
}),
},
];
writer.produce({ messages: messages });
}
// Read 10 messages only
let messages = reader.consume({ limit: 10 });
check(messages, {
"10 messages returned": (msgs) => msgs.length == 10,
"key is correct": (msgs) =>
schemaRegistry
.deserialize({ data: msgs[0].key, schemaType: SCHEMA_TYPE_JSON })
.correlationId.startsWith("test-id-"),
"value is correct": (msgs) =>
schemaRegistry.deserialize({
data: msgs[0].value,
schemaType: SCHEMA_TYPE_JSON,
}).name == "xk6-kafka",
});
}
export function teardown(data) {
if (__VU == 0) {
// Delete the topic
connection.deleteTopic(topic);
}
writer.close();
reader.close();
connection.close();
}
/*
This is a k6 test script that imports the xk6-kafka and
tests Kafka with a 200 JSON messages per iteration.
*/
import { check } from "k6";
// import * as kafka from "k6/x/kafka";
import {
Writer,
Reader,
Connection,
SchemaRegistry,
SCHEMA_TYPE_STRING,
} from "k6/x/kafka"; // import kafka extension
// Prints module-level constants
// console.log(kafka);
const brokers = ["localhost:9092"];
const topic = "xk6_kafka_json_topic";
const writer = new Writer({
brokers: brokers,
topic: topic,
autoCreateTopic: true,
});
const reader = new Reader({
brokers: brokers,
topic: topic,
});
const connection = new Connection({
address: brokers[0],
});
const schemaRegistry = new SchemaRegistry();
if (__VU == 0) {
connection.createTopic({ topic: topic });
}
export const options = {
thresholds: {
// Base thresholds to see if the writer or reader is working
kafka_writer_error_count: ["count == 0"],
kafka_reader_error_count: ["count == 0"],
},
};
export default function () {
for (let index = 0; index < 100; index++) {
let messages = [
{
key: schemaRegistry.serialize({
data: "test-key-string",
schemaType: SCHEMA_TYPE_STRING,
}),
value: schemaRegistry.serialize({
data: "test-value-string",
schemaType: SCHEMA_TYPE_STRING,
}),
headers: {
mykey: "myvalue",
},
offset: index,
partition: 0,
time: new Date(), // Will be converted to timestamp automatically
},
{
key: schemaRegistry.serialize({
data: "test-key-string",
schemaType: SCHEMA_TYPE_STRING,
}),
value: schemaRegistry.serialize({
data: "test-value-string",
schemaType: SCHEMA_TYPE_STRING,
}),
headers: {
mykey: "myvalue",
},
},
];
writer.produce({ messages: messages });
}
// Read 10 messages only
let messages = reader.consume({ limit: 10 });
check(messages, {
"10 messages are received": (messages) => messages.length == 10,
});
check(messages[0], {
"Topic equals to xk6_kafka_json_topic": (msg) => msg["topic"] == topic,
"Key is a string and is correct": (msg) =>
schemaRegistry.deserialize({
data: msg.key,
schemaType: SCHEMA_TYPE_STRING,
}) == "test-key-string",
"Value is a string and is correct": (msg) =>
typeof schemaRegistry.deserialize({
data: msg.value,
schemaType: SCHEMA_TYPE_STRING,
}) == "string" &&
schemaRegistry.deserialize({
data: msg.value,
schemaType: SCHEMA_TYPE_STRING,
}) == "test-value-string",
"Header equals {'mykey': 'myvalue'}": (msg) =>
"mykey" in msg.headers &&
String.fromCharCode(...msg.headers["mykey"]) == "myvalue",
"Time is past": (msg) => new Date(msg["time"]) < new Date(),
"Partition is zero": (msg) => msg["partition"] == 0,
"Offset is gte zero": (msg) => msg["offset"] >= 0,
"High watermark is gte zero": (msg) => msg["highWaterMark"] >= 0,
});
}
export function teardown(data) {
if (__VU == 0) {
// Delete the topic
connection.deleteTopic(topic);
}
writer.close();
reader.close();
connection.close();
}
/*
This is a k6 test script that imports the xk6-kafka and
*/
import { check } from "k6";
// import * as kafka from "k6/x/kafka";
import { Reader, Connection } from "k6/x/kafka"; // import kafka extension
// Prints module-level constants
// console.log(kafka);
const brokers = ["localhost:9092"];
const topic = "xk6_kafka_json_topic";
const reader = new Reader({
brokers: brokers,
topic: topic,
maxWait: "5s",
});
const connection = new Connection({
address: brokers[0],
});
if (__VU === 0) {
connection.createTopic({ topic: topic });
}
export const options = {
thresholds: {
// Base thresholds to see if the writer or reader is working
kafka_writer_error_count: ["count == 0"],
kafka_reader_error_count: ["count == 0"],
},
duration: "11s",
};
export default function () {
// Read 10 messages only
let messages = reader.consume({ limit: 10 });
console.log("continuing execution");
check(messages, {
"10 messages are received": (messages) => messages.length === 10,
});
}
export function teardown(data) {
if (__VU === 0) {
// Delete the topic
connection.deleteTopic(topic);
}
reader.close();
connection.close();
}
/*
* This script shows how to load certificates and keys from
* a JKS keystore, so that they can be used in a configuring TLS.
* This is just a showcase, and not a complete script.
* The keystore MUST be created with JKS storetype.
*
* ⚠️ The PKCS#12 format is not supported.
*/
import { LoadJKS, TLS_1_2 } from "k6/x/kafka";
// If server and client keystore are separate, then you must
// call LoadJKS twice, once for each keystore.
// This will load the certificates and keys from the keystore
// and write them to the disk, so that they can be used in
// the TLS configuration.
const jks = LoadJKS({
path: "fixtures/kafka-keystore.jks",
password: "password",
clientCertAlias: "localhost",
clientKeyAlias: "localhost",
clientKeyPassword: "password",
serverCaAlias: "caroot",
});
const tlsConfig = {
enableTls: true,
insecureSkipTlsVerify: false,
minVersion: TLS_1_2,
// The certificates and keys can be loaded from a JKS keystore:
// clientCertsPem is an array of PEM-encoded certificates, and the filenames
// will be named "client-cert-0.pem", "client-cert-1.pem", etc.
// clientKeyPem is the PEM-encoded private key and the filename will be
// named "client-key.pem".
// serverCaPem is the PEM-encoded CA certificate and the filename will be
// named "server-ca.pem".
clientCertPem: jks["clientCertsPem"][0], // The first certificate in the chain
clientKeyPem: jks["clientKeyPem"],
serverCaPem: jks["serverCaPem"],
};
export default function () {
console.log(Object.keys(jks));
console.log(jks);
console.log(tlsConfig);
}
/*
This is a k6 test script that imports the xk6-kafka and
list topics on all Kafka partitions and creates a topic.
*/
import { Connection } from "k6/x/kafka"; // import kafka extension
const address = "localhost:9092";
const topic = "xk6_kafka_test_topic";
const connection = new Connection({
address: address,
});
const results = connection.listTopics();
connection.createTopic({ topic: topic });
export default function () {
results.forEach((topic) => console.log(topic));
}
export function teardown(data) {
if (__VU == 0) {
// Delete the topic
connection.deleteTopic(topic);
}
connection.close();
}
This diff is collapsed.
# How to run a k6 test against a Redis test server with TLS
1. Move in the docker folder `cd docker`
2. Run `sh gen-test-certs.sh` to generate custom TLS certificates that the docker container will use.
3. Run `docker-compose up` to start the Redis server with TLS enabled.
4. Connect to it with `redis-cli --tls --cert ./tests/tls/redis.crt --key ./tests/tls/redis.key --cacert ./tests/tls/ca.crt` and run `AUTH tjkbZ8jrwz3pGiku` to authenticate, and verify that the redis server is properly set up.
5. Build the k6 binary with `xk6 build --with github.com/k6io/xk6-redis=.`
5. Run `./k6 run loadtest-tls.js` to run the k6 load test with TLS enabled.
\ No newline at end of file
version: "3.3"
services:
redis:
image: docker.io/bitnami/redis:7.0.8
user: root
restart: always
environment:
- ALLOW_EMPTY_PASSWORD=false
- REDIS_PASSWORD=tjkbZ8jrwz3pGiku
- REDIS_DISABLE_COMMANDS=FLUSHDB,FLUSHALL
- REDIS_EXTRA_FLAGS=--loglevel verbose --tls-auth-clients optional
- REDIS_TLS_ENABLED=yes
- REDIS_TLS_PORT=6379
- REDIS_TLS_CERT_FILE=/tls/redis.crt
- REDIS_TLS_KEY_FILE=/tls/redis.key
- REDIS_TLS_CA_FILE=/tls/ca.crt
ports:
- "6379:6379"
volumes:
- redis_data:/bitnami/redis/data
- ./tests/tls:/tls
volumes:
redis_data:
driver: local
#!/bin/bash
# Generate some test certificates which are used by the regression test suite:
#
# tests/tls/ca.{crt,key} Self signed CA certificate.
# tests/tls/redis.{crt,key} A certificate with no key usage/policy restrictions.
# tests/tls/client.{crt,key} A certificate restricted for SSL client usage.
# tests/tls/server.{crt,key} A certificate restricted for SSL server usage.
# tests/tls/redis.dh DH Params file.
generate_cert() {
local name=$1
local cn="$2"
local opts="$3"
local keyfile=tests/tls/${name}.key
local certfile=tests/tls/${name}.crt
[ -f $keyfile ] || openssl genrsa -out $keyfile 2048
openssl req \
-new -sha256 \
-subj "/O=Redis Test/CN=$cn" \
-key $keyfile |
openssl x509 \
-req -sha256 \
-CA tests/tls/ca.crt \
-CAkey tests/tls/ca.key \
-CAserial tests/tls/ca.txt \
-CAcreateserial \
-days 365 \
$opts \
-out $certfile
}
mkdir -p tests/tls
[ -f tests/tls/ca.key ] || openssl genrsa -out tests/tls/ca.key 4096
openssl req \
-x509 -new -nodes -sha256 \
-key tests/tls/ca.key \
-days 3650 \
-subj '/O=Redis Test/CN=Certificate Authority' \
-out tests/tls/ca.crt
cat >tests/tls/openssl.cnf <<_END_
[ server_cert ]
keyUsage = digitalSignature, keyEncipherment
nsCertType = server
[ client_cert ]
keyUsage = digitalSignature, keyEncipherment
nsCertType = client
_END_
generate_cert server "Server-only" "-extfile tests/tls/openssl.cnf -extensions server_cert"
generate_cert client "Client-only" "-extfile tests/tls/openssl.cnf -extensions client_cert"
generate_cert redis "Generic-cert"
[ -f tests/tls/redis.dh ] || openssl dhparam -out tests/tls/redis.dh 2048
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment