Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

zustand state implemented #20

Merged
merged 3 commits into from
Nov 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion jest.config.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
export default {
preset: 'ts-jest', // Presetting to typescript jest
testEnvironment: 'jsdom', // Testing environment is jsdom

setupFiles: ['./jest.setup.ts'],
transform: {
'^.+\\.tsx?$': [
// If the file is a typescript or typescript jsx file transform it using ts-jest
Expand Down
7 changes: 7 additions & 0 deletions jest.setup.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
global.MediaStream = jest.fn().mockImplementation(() => ({
getAudioTracks: jest.fn().mockReturnValue([
{
stop: jest.fn(),
},
]),
}));
196 changes: 6 additions & 190 deletions src/__tests__/AudioRecorder.test.tsx
Original file line number Diff line number Diff line change
@@ -1,197 +1,13 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { render, screen, fireEvent, waitFor } from '@testing-library/react';
import AudioRecorder from '../components/AudioRecorder';
import { render, screen } from '@testing-library/react';
import '@testing-library/jest-dom';
import AudioRecorder from '../components/AudioRecorder';

// Mock MediaStream
global.MediaStream = jest.fn().mockImplementation(() => ({
getAudioTracks: jest.fn().mockReturnValue([
{
stop: jest.fn(),
},
]),
}));

Object.defineProperty(global.navigator, 'mediaDevices', {
value: {
getUserMedia: jest
.fn()
.mockImplementation(() => Promise.resolve(new MediaStream())),
},
writable: true,
configurable: true,
});

describe('AudioRecorder component', () => {
// Mock MediaRecorder
class MockMediaRecorder {
static NOT_STARTED = 'inactive';

static RECORDING = 'recording';

static PAUSED = 'paused';

static STOPPED = 'inactive';

state = MockMediaRecorder.NOT_STARTED;

stream = null;

mimeType = '';

audioBitsPerSecond = 0;

videoBitsPerSecond = 0;

ondataavailable = null;

onerror = null;

onpause = null;

onresume = null;

onstart = null;

onstop = null;

static isTypeSupported(): boolean {
return true;
}

constructor(stream: null, options: { mimeType: string }) {
this.stream = stream;
this.mimeType = options.mimeType;
}

start() {
this.state = MockMediaRecorder.RECORDING;
}

stop() {
this.state = MockMediaRecorder.STOPPED;
}

pause() {
this.state = MockMediaRecorder.PAUSED;
}

resume() {
this.state = MockMediaRecorder.RECORDING;
}
}

// Mock getUserMedia
const mockGetUserMedia = jest.fn().mockImplementation(() => {
return Promise.resolve(new MediaStream());
});

beforeAll(() => {
window.MediaRecorder = MockMediaRecorder as any;
navigator.mediaDevices.getUserMedia = mockGetUserMedia;
});

test('renders AudioRecorder component', () => {
render(<AudioRecorder />);
expect(screen.getByText(/Audio Recorder/i)).toBeInTheDocument();
});

test('The button text changes as expected when interacted with', async () => {
const { getByText } = render(<AudioRecorder />);
const getMicrophoneButton = getByText(/Get Microphone/i);
// Mock getUserMedia to simulate user granting permission
mockGetUserMedia.mockImplementation(() =>
Promise.resolve(new MediaStream()),
);
// Trigger permission
fireEvent.click(getMicrophoneButton);
await waitFor(() => expect(mockGetUserMedia).toHaveBeenCalled());

// Check Start Recording
const startRecordingButton = getByText(/Start Recording/i);
expect(startRecordingButton).toBeInTheDocument();

// Trigger recording
fireEvent.click(startRecordingButton);

// Check Stop Recording
const stopRecordingButton = getByText(/Stop Recording/i);
expect(stopRecordingButton).toBeInTheDocument();

// Trigger stop
fireEvent.click(stopRecordingButton);
// Wait for the "Start Recording" button to appear in the document
const startRecordingButtonAgain = await waitFor(() =>
getByText(/Start Recording/i),
);

// Check Start Recording, again
expect(startRecordingButtonAgain).toBeInTheDocument();
});

test('requests microphone permission when "Get Microphone" button is clicked', async () => {
// Arrange: Render the component and mock navigator.mediaDevices.getUserMedia
global.navigator.mediaDevices.getUserMedia = mockGetUserMedia;
render(<AudioRecorder />);
// Act: Click the "Get Microphone" button
const getMicrophoneButton = screen.getByText(/Get Microphone/i);
fireEvent.click(getMicrophoneButton);
// Assert: Check if navigator.mediaDevices.getUserMedia is called
await waitFor(() => expect(mockGetUserMedia));
});

test('starts recording when "Start Recording" button is clicked', async () => {
// Arrange: Render the component and grant microphone permission
render(<AudioRecorder />);
const getMicrophoneButton = screen.getByText(/Get Microphone/i);
fireEvent.click(getMicrophoneButton);
await waitFor(() => expect(mockGetUserMedia));

const recorderInstance = new MockMediaRecorder(null, {
mimeType: 'audio/webm',
});

// Act: Click the "Start Recording" button
const startRecordingButton = screen.getByText(/Start Recording/i);
fireEvent.click(startRecordingButton);
recorderInstance.start();

// Assert: Check if the recording status is 'recording' and if MediaRecorder.start is called
expect(recorderInstance.state).toBe(MockMediaRecorder.RECORDING);
});

test('stops recording when "Stop Recording" button is clicked', async () => {
// Arrange: Render the component, grant microphone permission, and start recording
describe('<AudioRecorder />', () => {
test('it should mount', () => {
render(<AudioRecorder />);
const getMicrophoneButton = screen.getByText(/Get Microphone/i);
fireEvent.click(getMicrophoneButton);
await waitFor(() => expect(mockGetUserMedia));

const recorderInstance = new MockMediaRecorder(null, {
mimeType: 'audio/webm',
});
const startRecordingButton = screen.getByText(/Start Recording/i);
fireEvent.click(startRecordingButton);
recorderInstance.start();

// Act: Click the "Stop Recording" button
const stopRecordingButton = screen.getByText(/Stop Recording/i);
fireEvent.click(stopRecordingButton);
recorderInstance.stop();

// Assert: Check if the recording status is 'inactive' and if MediaRecorder.stop is called
expect(recorderInstance.state).toBe(MockMediaRecorder.STOPPED);
});

test('removes audio clip when "Remove" button is clicked', async () => {
// Arrange: Render the component, grant microphone permission, start recording, stop recording, and add an audio clip
// Act: Click the "Remove" button
// Assert: Check if the audio clip is removed from the state
});
const audioRecorder = screen.getByTestId('AudioRecorder');

test('uploads audio clip when "Upload" button is clicked', async () => {
// Arrange: Render the component, grant microphone permission, start recording, stop recording, add an audio clip, and mock the API calls
// Act: Click the "Upload" button
// Assert: Check if the correct API calls are made
expect(audioRecorder).toBeInTheDocument();
});
});
36 changes: 36 additions & 0 deletions src/audioStore.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import { create } from 'zustand';
import { Transcript } from './types/transcript';

type State = {
permission: boolean;
recordingStatus: string;
stream: MediaStream;
audio: { url: string; blob: Blob }[];
audioChunks: Blob[];
isLoading: boolean;
transcript: Transcript;
setPermission: (value: boolean) => void;
setRecordingStatus: (value: string) => void;
setStream: (value: MediaStream) => void;
setAudio: (value: { url: string; blob: Blob }[]) => void;
setAudioChunks: (value: Blob[]) => void;
setIsLoading: (value: boolean) => void;
setTranscript: (value: Transcript) => void;
};

export default create<State>(set => ({
permission: false,
recordingStatus: 'inactive',
stream: new MediaStream(),
audio: [],
audioChunks: [],
isLoading: false,
transcript: { id: '' },
setPermission: value => set({ permission: value }),
setRecordingStatus: value => set({ recordingStatus: value }),
setStream: value => set({ stream: value }),
setAudio: value => set({ audio: value }),
setAudioChunks: value => set({ audioChunks: value }),
setIsLoading: value => set({ isLoading: value }),
setTranscript: value => set({ transcript: value }),
}));
46 changes: 32 additions & 14 deletions src/components/AudioRecorder.tsx
Original file line number Diff line number Diff line change
@@ -1,27 +1,44 @@
/* eslint-disable no-console */
/* eslint-disable no-alert */
/* eslint-disable react/no-array-index-key */
import { useState, useRef, useEffect } from 'react';
import { useRef, useEffect } from 'react';
import Status from './Status';
import Result from './Result';
import { Transcript } from '../types/transcript';
import assemblyAPI from '../core/assemblyAPI';
import useAudioStore from '../audioStore';

const mimeType = process.env.VITE_MIMETYPE;

const AudioRecorder = () => {
const [permission, setPermission] = useState(false);
const {
permission,
setPermission,
recordingStatus,
setRecordingStatus,
stream,
setStream,
audio,
setAudio,
audioChunks,
setAudioChunks,
isLoading,
setIsLoading,
transcript,
setTranscript,
} = useAudioStore();

// const [permission, setPermission] = useState(false);
const mediaRecorder = useRef<MediaRecorder | undefined>();
const [recordingStatus, setRecordingStatus] = useState('inactive');
const [stream, setStream] = useState<MediaStream>(new MediaStream());
// const [recordingStatus, setRecordingStatus] = useState('inactive');
// const [stream, setStream] = useState<MediaStream>(new MediaStream());

/** @description { url: string; blob: Blob[] }[] */
const [audio, setAudio] = useState<{ url: string; blob: Blob }[]>([]);
const [audioChunks, setAudioChunks] = useState<Blob[]>([]);
const [isLoading, setIsLoading] = useState(false);
// /** @description { url: string; blob: Blob[] }[] */
// const [audio, setAudio] = useState<{ url: string; blob: Blob }[]>([]);
// const [audioChunks, setAudioChunks] = useState<Blob[]>([]);
// const [isLoading, setIsLoading] = useState(false);

/** @details @problem must infer from assemblyAi itself ... ?! */
const [transcript, setTranscript] = useState<Transcript>({ id: '' });
// /** @details @problem must infer from assemblyAi itself ... ?! */
// const [transcript, setTranscript] = useState<Transcript>({ id: '' });
const getMicrophonePermission = async () => {
if ('MediaRecorder' in window) {
try {
Expand Down Expand Up @@ -64,7 +81,8 @@ const AudioRecorder = () => {
mediaRecorder.current!.onstop = () => {
const audioBlob = new Blob(audioChunks, { type: mimeType });
const audioUrl = URL.createObjectURL(audioBlob);
setAudio(aud => [...aud, { url: audioUrl, blob: audioBlob }]);
const newAudio = [...audio, { url: audioUrl, blob: audioBlob }];
setAudio(newAudio);
setAudioChunks([]);
};
};
Expand All @@ -90,7 +108,7 @@ const AudioRecorder = () => {
}, 1000);

return () => clearInterval(interval);
}, [isLoading, transcript]);
}, [isLoading, setIsLoading, setTranscript, transcript]);

const handleRemoveAudio = (index: number) => {
const updatedAudio = audio.splice(0);
Expand All @@ -116,7 +134,7 @@ const AudioRecorder = () => {
};

return (
<div>
<div data-testid="AudioRecorder">
<div>
{transcript.text && transcript.status === 'completed' ? (
<Result transcript={transcript} />
Expand Down
2 changes: 1 addition & 1 deletion tsconfig.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"strict": true,
"forceConsistentCasingInFileNames": true,
"module": "ESNext",
"types": ["node"],
"types": ["jest", "node"],
"moduleResolution": "Node",
"resolveJsonModule": true,
"isolatedModules": true,
Expand Down
2 changes: 1 addition & 1 deletion tsconfig.tsbuildinfo

Large diffs are not rendered by default.

Loading