2023-01-03 16:55:26 +00:00
/ *
2024-09-06 10:22:13 +02:00
Copyright 2018 - 2024 New Vector Ltd .
2022-04-07 14:22:36 -07:00
Copyright 2017 OpenMarket Ltd
2025-02-18 17:59:58 +00:00
SPDX - License - Identifier : AGPL - 3.0 - only OR LicenseRef - Element - Commercial
2024-09-06 10:22:13 +02:00
Please see LICENSE in the repository root for full details .
2022-04-07 14:22:36 -07:00
* /
// This module contains all the code needed to log the console, persist it to
// disk and submit bug reports. Rationale is as follows:
// - Monkey-patching the console is preferable to having a log library because
// we can catch logs by other libraries more easily, without having to all
// depend on the same log framework / pass the logger around.
// - We use IndexedDB to persists logs because it has generous disk space
// limits compared to local storage. IndexedDB does not work in incognito
// mode, in which case this module will not be able to write logs to disk.
// However, the logs will still be stored in-memory, so can still be
// submitted in a bug report should the user wish to: we can also store more
// logs in-memory than in local storage, which does work in incognito mode.
// We also need to handle the case where there are 2+ tabs. Each JS runtime
// generates a random string which serves as the "ID" for that tab/session.
// These IDs are stored along with the log lines.
// - Bug reports are sent as a POST over HTTPS: it purposefully does not use
// Matrix as bug reports may be made when Matrix is not responsive (which may
// be the cause of the bug). We send the most recent N MB of UTF-8 log data,
// starting with the most recent, which we know because the "ID"s are
// actually timestamps. We then purge the remaining logs. We also do this
// purge on startup to prevent logs from accumulating.
2023-01-16 17:27:49 +00:00
import EventEmitter from "events" ;
2024-11-14 12:18:43 +01:00
import { throttle } from "lodash-es" ;
2024-12-11 09:27:55 +00:00
import { type Logger , logger } from "matrix-js-sdk/src/logger" ;
2025-02-03 13:28:12 +01:00
import { secureRandomString } from "matrix-js-sdk/src/randomstring" ;
2024-12-11 09:27:55 +00:00
import { type LoggingMethod } from "loglevel" ;
import type loglevel from "loglevel" ;
2022-04-07 14:22:36 -07:00
// the length of log data we keep in indexeddb (and include in the reports)
const MAX_LOG_SIZE = 1024 * 1024 * 5 ; // 5 MB
2023-01-16 17:27:49 +00:00
// Shortest amount of time between flushes. We are just appending to an
// IndexedDB table so we don't expect flushing to be that expensive, but
// we can batch the writes a little.
2023-01-17 10:16:55 +00:00
const MAX_FLUSH_INTERVAL_MS = 2 * 1000 ;
2023-01-16 17:27:49 +00:00
2023-09-26 12:08:08 +01:00
// only descend this far into nested object trees
const DEPTH_LIMIT = 3 ;
2023-01-16 17:27:49 +00:00
enum ConsoleLoggerEvent {
Log = "log" ,
}
2022-04-07 14:22:36 -07:00
// A class which monkey-patches the global console and stores log lines.
2022-06-06 22:42:48 +02:00
interface LogEntry {
id : string ;
2022-06-08 16:36:22 +02:00
lines : string ;
index? : number ;
2022-06-06 22:42:48 +02:00
}
2023-06-05 18:53:05 +02:00
class ConsoleLogger extends EventEmitter {
2022-06-11 14:28:30 +02:00
private logs = "" ;
2022-06-08 16:36:22 +02:00
2023-09-25 18:04:34 +01:00
public log = (
level : LogLevel ,
2023-09-26 12:08:08 +01:00
. . . args : ( Error | DOMException | object | string | undefined ) [ ]
2023-09-25 18:04:34 +01:00
) : void = > {
2022-04-07 14:22:36 -07:00
// We don't know what locale the user may be running so use ISO strings
const ts = new Date ( ) . toISOString ( ) ;
// Convert objects and errors to helpful things
args = args . map ( ( arg ) = > {
if ( arg instanceof DOMException ) {
return arg . message + ` ( ${ arg . name } | ${ arg . code } ) ` ;
} else if ( arg instanceof Error ) {
return arg . message + ( arg . stack ? ` \ n ${ arg . stack } ` : "" ) ;
} else if ( typeof arg === "object" ) {
2022-06-11 14:28:30 +02:00
return JSON . stringify ( arg , getCircularReplacer ( ) ) ;
2022-04-07 14:22:36 -07:00
} else {
return arg ;
}
} ) ;
// Some browsers support string formatting which we're not doing here
// so the lines are a little more ugly but easy to implement / quick to
// run.
// Example line:
// 2017-01-18T11:23:53.214Z W Failed to set badge count
2024-12-10 13:05:09 -05:00
// eslint-disable-next-line @typescript-eslint/no-base-to-string
2022-04-07 14:22:36 -07:00
let line = ` ${ ts } ${ level } ${ args . join ( " " ) } \ n ` ;
// Do some cleanup
line = line . replace ( /token=[a-zA-Z0-9-]+/gm , "token=xxxxx" ) ;
// Using + really is the quickest way in JS
// http://jsperf.com/concat-vs-plus-vs-join
this . logs += line ;
2023-01-16 17:27:49 +00:00
this . emit ( ConsoleLoggerEvent . Log ) ;
2023-09-25 18:04:34 +01:00
} ;
2022-04-07 14:22:36 -07:00
/ * *
2023-01-16 16:43:45 +00:00
* Returns the log lines to flush to disk and empties the internal log buffer
* @return { string } \ n delimited log lines
2022-04-07 14:22:36 -07:00
* /
2023-01-16 16:43:45 +00:00
public popLogs ( ) : string {
2022-04-07 14:22:36 -07:00
const logsToFlush = this . logs ;
this . logs = "" ;
return logsToFlush ;
}
2023-01-16 16:43:45 +00:00
/ * *
* Returns lines currently in the log buffer without removing them
* @return { string } \ n delimited log lines
* /
public peekLogs ( ) : string {
return this . logs ;
}
2022-04-07 14:22:36 -07:00
}
2022-02-01 15:11:06 -08:00
2022-04-07 14:22:36 -07:00
// A class which stores log lines in an IndexedDB instance.
2023-06-05 18:53:05 +02:00
class IndexedDBLogStore {
2022-06-11 14:28:30 +02:00
private index = 0 ;
2023-06-30 16:43:28 +01:00
private db? : IDBDatabase ;
private flushPromise? : Promise < void > ;
private flushAgainPromise? : Promise < void > ;
2022-06-11 14:28:30 +02:00
private id : string ;
2023-09-22 18:05:13 -04:00
public constructor (
2023-01-16 17:27:49 +00:00
private indexedDB : IDBFactory ,
2023-10-11 10:42:04 -04:00
private loggerInstance : ConsoleLogger ,
2023-01-16 17:27:49 +00:00
) {
2025-02-03 13:28:12 +01:00
this . id = "instance-" + secureRandomString ( 16 ) ;
2023-01-16 17:27:49 +00:00
loggerInstance . on ( ConsoleLoggerEvent . Log , this . onLoggerLog ) ;
2024-09-10 09:49:35 +02:00
window . addEventListener ( "beforeunload" , ( ) = > {
this . flush ( ) . catch ( ( e ) = >
logger . error ( "Failed to flush logs before unload" , e ) ,
) ;
} ) ;
2022-04-07 14:22:36 -07:00
}
/ * *
* @return { Promise } Resolves when the store is ready .
* /
2024-09-10 09:49:35 +02:00
public async connect ( ) : Promise < void > {
2022-04-07 14:22:36 -07:00
const req = this . indexedDB . open ( "logs" ) ;
2022-06-11 14:28:30 +02:00
return new Promise ( ( resolve , reject ) = > {
2023-09-22 18:05:13 -04:00
req . onsuccess = ( ) : void = > {
2023-01-16 16:43:45 +00:00
this . db = req . result ;
2023-01-16 17:27:49 +00:00
2022-04-07 14:22:36 -07:00
resolve ( ) ;
} ;
2023-09-22 18:05:13 -04:00
req . onerror = ( ) : void = > {
2023-06-30 16:43:28 +01:00
const err = "Failed to open log database: " + req ? . error ? . name ;
2022-06-11 14:28:30 +02:00
logger . error ( err ) ;
2022-04-07 14:22:36 -07:00
reject ( new Error ( err ) ) ;
} ;
// First time: Setup the object store
2023-09-22 18:05:13 -04:00
req . onupgradeneeded = ( ) : void = > {
2023-01-16 16:43:45 +00:00
const db = req . result ;
// This is the log entries themselves. Each entry is a chunk of
// logs (ie multiple lines). 'id' is the instance ID (so logs with
// the same instance ID are all from the same session) and 'index'
// is a sequence number for the chunk. The log lines live in the
// 'lines' key, which is a chunk of many newline-separated log lines.
2022-04-07 14:22:36 -07:00
const logObjStore = db . createObjectStore ( "logs" , {
keyPath : [ "id" , "index" ] ,
} ) ;
// Keys in the database look like: [ "instance-148938490", 0 ]
2023-01-16 16:43:45 +00:00
// (The instance ID plus the ID of each log chunk).
2022-04-07 14:22:36 -07:00
// Later on we need to query everything based on an instance id.
// In order to do this, we need to set up indexes "id".
logObjStore . createIndex ( "id" , "id" , { unique : false } ) ;
logObjStore . add (
2023-10-11 10:42:04 -04:00
this . generateLogEntry ( new Date ( ) + " ::: Log database was created." ) ,
2022-04-07 14:22:36 -07:00
) ;
2022-02-01 15:11:06 -08:00
2023-01-16 16:43:45 +00:00
// This records the last time each instance ID generated a log message, such
// that the logs from each session can be collated in the order they last logged
// something.
2022-04-07 14:22:36 -07:00
const lastModifiedStore = db . createObjectStore ( "logslastmod" , {
keyPath : "id" ,
} ) ;
lastModifiedStore . add ( this . generateLastModifiedTime ( ) ) ;
} ;
} ) ;
}
2023-09-22 18:05:13 -04:00
private onLoggerLog = ( ) : void = > {
2023-01-16 17:27:49 +00:00
if ( ! this . db ) return ;
this . throttledFlush ( ) ;
} ;
// Throttled function to flush logs. We use throttle rather
// than debounce as we want logs to be written regularly, otherwise
// if there's a constant stream of logging, we'd never write anything.
2024-09-10 09:49:35 +02:00
private throttledFlush = throttle ( ( ) = > this . flush , MAX_FLUSH_INTERVAL_MS , {
leading : false ,
trailing : true ,
} ) ;
2023-01-16 17:27:49 +00:00
2022-04-07 14:22:36 -07:00
/ * *
* Flush logs to disk .
*
* There are guards to protect against race conditions in order to ensure
* that all previous flushes have completed before the most recent flush .
* Consider without guards :
* - A calls flush ( ) periodically .
* - B calls flush ( ) and wants to send logs immediately afterwards .
* - If B doesn 't wait for A' s flush to complete , B will be missing the
* contents of A ' s flush .
* To protect against this , we set 'flushPromise' when a flush is ongoing .
* Subsequent calls to flush ( ) during this period will chain another flush ,
* then keep returning that same chained flush .
*
* This guarantees that we will always eventually do a flush when flush ( ) is
* called .
*
* @return { Promise } Resolved when the logs have been flushed .
* /
2024-09-10 09:49:35 +02:00
public flush = async ( ) : Promise < void > = > {
2022-04-07 14:22:36 -07:00
// check if a flush() operation is ongoing
if ( this . flushPromise ) {
if ( this . flushAgainPromise ) {
// this is the 3rd+ time we've called flush() : return the same promise.
return this . flushAgainPromise ;
}
// queue up a flush to occur immediately after the pending one completes.
2024-09-10 09:49:35 +02:00
this . flushAgainPromise = this . flushPromise . then ( this . flush ) . then ( ( ) = > {
this . flushAgainPromise = undefined ;
} ) ;
2022-04-07 14:22:36 -07:00
return this . flushAgainPromise ;
}
// there is no flush promise or there was but it has finished, so do
// a brand new one, destroying the chain which may have been built up.
2022-06-06 22:42:48 +02:00
this . flushPromise = new Promise < void > ( ( resolve , reject ) = > {
2022-04-07 14:22:36 -07:00
if ( ! this . db ) {
// not connected yet or user rejected access for us to r/w to the db.
reject ( new Error ( "No connected database" ) ) ;
return ;
}
2023-01-16 17:27:49 +00:00
const lines = this . loggerInstance . popLogs ( ) ;
2022-04-07 14:22:36 -07:00
if ( lines . length === 0 ) {
resolve ( ) ;
return ;
}
const txn = this . db . transaction ( [ "logs" , "logslastmod" ] , "readwrite" ) ;
const objStore = txn . objectStore ( "logs" ) ;
2023-09-22 18:05:13 -04:00
txn . oncomplete = ( ) : void = > {
2022-04-07 14:22:36 -07:00
resolve ( ) ;
} ;
2023-09-22 18:05:13 -04:00
txn . onerror = ( event ) : void = > {
2022-06-11 14:28:30 +02:00
logger . error ( "Failed to flush logs : " , event ) ;
2023-06-30 16:43:28 +01:00
reject ( new Error ( "Failed to write logs: " + txn ? . error ? . message ) ) ;
2022-04-07 14:22:36 -07:00
} ;
2022-06-08 16:36:22 +02:00
objStore . add ( this . generateLogEntry ( lines ) ) ;
2022-04-07 14:22:36 -07:00
const lastModStore = txn . objectStore ( "logslastmod" ) ;
lastModStore . put ( this . generateLastModifiedTime ( ) ) ;
} ) . then ( ( ) = > {
2023-06-30 16:43:28 +01:00
this . flushPromise = undefined ;
2022-04-07 14:22:36 -07:00
} ) ;
return this . flushPromise ;
2023-01-16 17:27:49 +00:00
} ;
2022-04-07 14:22:36 -07:00
/ * *
* Consume the most recent logs and return them . Older logs which are not
* returned are deleted at the same time , so this can be called at startup
* to do house - keeping to keep the logs from growing too large .
*
* @return { Promise < Object [ ] > } Resolves to an array of objects . The array is
* sorted in time ( oldest first ) based on when the log file was created ( the
* log ID ) . The objects have said log ID in an "id" field and "lines" which
* is a big string with all the new - line delimited logs .
* /
2022-06-11 14:28:30 +02:00
public async consume ( ) : Promise < LogEntry [ ] > {
2022-04-07 14:22:36 -07:00
const db = this . db ;
2023-06-30 16:43:28 +01:00
if ( ! db ) {
return Promise . reject ( new Error ( "No connected database" ) ) ;
}
2022-04-07 14:22:36 -07:00
// Returns: a string representing the concatenated logs for this ID.
// Stops adding log fragments when the size exceeds maxSize
2024-09-10 09:49:35 +02:00
async function fetchLogs ( id : string , maxSize : number ) : Promise < string > {
2023-06-30 16:43:28 +01:00
const objectStore = db !
2022-04-07 14:22:36 -07:00
. transaction ( "logs" , "readonly" )
. objectStore ( "logs" ) ;
return new Promise ( ( resolve , reject ) = > {
const query = objectStore
. index ( "id" )
. openCursor ( IDBKeyRange . only ( id ) , "prev" ) ;
let lines = "" ;
2023-09-22 18:05:13 -04:00
query . onerror = ( ) : void = > {
2023-06-30 16:43:28 +01:00
reject ( new Error ( "Query failed: " + query ? . error ? . message ) ) ;
2022-04-07 14:22:36 -07:00
} ;
2023-09-22 18:05:13 -04:00
query . onsuccess = ( ) : void = > {
2023-01-16 16:43:45 +00:00
const cursor = query . result ;
2022-04-07 14:22:36 -07:00
if ( ! cursor ) {
2022-06-11 14:28:30 +02:00
resolve ( lines ) ;
2022-04-07 14:22:36 -07:00
return ; // end of results
2022-02-04 16:55:57 -08:00
}
2022-04-07 14:22:36 -07:00
lines = cursor . value . lines + lines ;
if ( lines . length >= maxSize ) {
2022-06-11 14:28:30 +02:00
resolve ( lines ) ;
2022-04-07 14:22:36 -07:00
} else {
cursor . continue ( ) ;
2022-02-04 16:55:57 -08:00
}
2022-04-07 14:22:36 -07:00
} ;
} ) ;
}
2022-06-11 14:28:30 +02:00
2022-04-07 14:22:36 -07:00
// Returns: A sorted array of log IDs. (newest first)
2024-09-10 09:49:35 +02:00
async function fetchLogIds ( ) : Promise < string [ ] > {
2022-04-07 14:22:36 -07:00
// To gather all the log IDs, query for all records in logslastmod.
2023-06-30 16:43:28 +01:00
const o = db !
2022-04-07 14:22:36 -07:00
. transaction ( "logslastmod" , "readonly" )
. objectStore ( "logslastmod" ) ;
2022-06-11 14:28:30 +02:00
return selectQuery < { ts : number ; id : string } > ( o , undefined , ( cursor ) = > {
2022-04-07 14:22:36 -07:00
return {
2022-06-11 14:28:30 +02:00
id : cursor.value.id ,
ts : cursor.value.ts ,
2022-04-07 14:22:36 -07:00
} ;
} ) . then ( ( res ) = > {
// Sort IDs by timestamp (newest first)
return res
. sort ( ( a , b ) = > {
return b . ts - a . ts ;
} )
. map ( ( a ) = > a . id ) ;
} ) ;
}
2022-06-11 14:28:30 +02:00
2024-09-10 09:49:35 +02:00
async function deleteLogs ( id : number ) : Promise < void > {
2022-06-11 14:28:30 +02:00
return new Promise < void > ( ( resolve , reject ) = > {
2023-06-30 16:43:28 +01:00
const txn = db ! . transaction ( [ "logs" , "logslastmod" ] , "readwrite" ) ;
2022-04-07 14:22:36 -07:00
const o = txn . objectStore ( "logs" ) ;
// only load the key path, not the data which may be huge
const query = o . index ( "id" ) . openKeyCursor ( IDBKeyRange . only ( id ) ) ;
2023-09-22 18:05:13 -04:00
query . onsuccess = ( ) : void = > {
2023-01-16 16:43:45 +00:00
const cursor = query . result ;
2022-04-07 14:22:36 -07:00
if ( ! cursor ) {
return ;
}
o . delete ( cursor . primaryKey ) ;
cursor . continue ( ) ;
} ;
2023-09-22 18:05:13 -04:00
txn . oncomplete = ( ) : void = > {
2022-04-07 14:22:36 -07:00
resolve ( ) ;
} ;
2023-09-22 18:05:13 -04:00
txn . onerror = ( ) : void = > {
2022-04-07 14:22:36 -07:00
reject (
new Error (
2023-10-11 10:42:04 -04:00
"Failed to delete logs for " + ` ' ${ id } ' : ${ txn ? . error ? . message } ` ,
) ,
2022-02-07 15:24:43 -08:00
) ;
2022-04-07 14:22:36 -07:00
} ;
// delete last modified entries
const lastModStore = txn . objectStore ( "logslastmod" ) ;
lastModStore . delete ( id ) ;
} ) ;
}
const allLogIds = await fetchLogIds ( ) ;
2023-06-30 16:43:28 +01:00
let removeLogIds : number [ ] = [ ] ;
2022-06-11 14:28:30 +02:00
const logs : LogEntry [ ] = [ ] ;
2022-04-07 14:22:36 -07:00
let size = 0 ;
for ( let i = 0 ; i < allLogIds . length ; i ++ ) {
2022-06-11 14:28:30 +02:00
const lines = await fetchLogs ( allLogIds [ i ] , MAX_LOG_SIZE - size ) ;
2022-04-07 14:22:36 -07:00
// always add the log file: fetchLogs will truncate once the maxSize we give it is
// exceeded, so we'll go over the max but only by one fragment's worth.
logs . push ( {
lines : lines ,
id : allLogIds [ i ] ,
} ) ;
size += lines . length ;
// If fetchLogs truncated we'll now be at or over the size limit,
// in which case we should stop and remove the rest of the log files.
if ( size >= MAX_LOG_SIZE ) {
// the remaining log IDs should be removed. If we go out of
// bounds this is just []
2023-06-30 16:43:28 +01:00
removeLogIds = allLogIds . slice ( i + 1 ) . map ( ( id ) = > parseInt ( id , 10 ) ) ;
2022-04-07 14:22:36 -07:00
break ;
}
}
if ( removeLogIds . length > 0 ) {
2022-06-11 14:28:30 +02:00
logger . log ( "Removing logs: " , removeLogIds ) ;
2022-04-07 14:22:36 -07:00
// Don't await this because it's non-fatal if we can't clean up
// logs.
2024-09-10 09:49:35 +02:00
Promise . all ( removeLogIds . map ( async ( id ) = > deleteLogs ( id ) ) ) . then (
2022-04-07 14:22:36 -07:00
( ) = > {
2022-06-11 14:28:30 +02:00
logger . log ( ` Removed ${ removeLogIds . length } old logs. ` ) ;
2022-04-07 14:22:36 -07:00
} ,
( err ) = > {
2022-06-11 14:28:30 +02:00
logger . error ( err ) ;
2023-10-11 10:42:04 -04:00
} ,
2022-04-07 14:22:36 -07:00
) ;
}
return logs ;
}
2022-06-11 14:28:30 +02:00
private generateLogEntry ( lines : string ) : LogEntry {
2022-04-07 14:22:36 -07:00
return {
id : this.id ,
lines : lines ,
index : this.index ++ ,
} ;
}
2022-02-07 15:24:43 -08:00
2022-06-11 14:28:30 +02:00
private generateLastModifiedTime ( ) : { id : string ; ts : number } {
2022-04-07 14:22:36 -07:00
return {
id : this.id ,
ts : Date.now ( ) ,
} ;
}
}
2022-02-01 15:11:06 -08:00
2022-04-07 14:22:36 -07:00
/ * *
* Helper method to collect results from a Cursor and promiseify it .
* @param { ObjectStore | Index } store The store to perform openCursor on .
* @param { IDBKeyRange = } keyRange Optional key range to apply on the cursor .
* @param { Function } resultMapper A function which is repeatedly called with a
* Cursor .
* Return the data you want to keep .
* @return { Promise < T [ ] > } Resolves to an array of whatever you returned from
* resultMapper .
* /
2024-09-10 09:49:35 +02:00
async function selectQuery < T > (
2022-06-06 22:42:48 +02:00
store : IDBObjectStore ,
2023-06-30 16:43:28 +01:00
keyRange : IDBKeyRange | undefined ,
2023-10-11 10:42:04 -04:00
resultMapper : ( cursor : IDBCursorWithValue ) = > T ,
2022-06-11 14:28:30 +02:00
) : Promise < T [ ] > {
2022-04-07 14:22:36 -07:00
const query = store . openCursor ( keyRange ) ;
return new Promise ( ( resolve , reject ) = > {
2023-06-30 16:43:28 +01:00
const results : T [ ] = [ ] ;
2023-09-22 18:05:13 -04:00
query . onerror = ( ) : void = > {
2023-06-30 16:43:28 +01:00
reject ( new Error ( "Query failed: " + query ? . error ? . message ) ) ;
2022-04-07 14:22:36 -07:00
} ;
// collect results
2023-09-22 18:05:13 -04:00
query . onsuccess = ( ) : void = > {
2023-01-16 16:43:45 +00:00
const cursor = query . result ;
2022-04-07 14:22:36 -07:00
if ( ! cursor ) {
resolve ( results ) ;
return ; // end of results
2022-02-01 15:39:45 -08:00
}
2022-04-07 14:22:36 -07:00
results . push ( resultMapper ( cursor ) ) ;
cursor . continue ( ) ;
} ;
} ) ;
2022-02-04 16:55:57 -08:00
}
2022-06-06 22:42:48 +02:00
declare global {
// eslint-disable-next-line no-var, camelcase
var mx_rage_store : IndexedDBLogStore ;
// eslint-disable-next-line no-var, camelcase
var mx_rage_logger : ConsoleLogger ;
// eslint-disable-next-line no-var, camelcase
2024-09-10 09:49:35 +02:00
var mx_rage_initStoragePromise : Promise < void > | undefined ;
2022-06-06 22:42:48 +02:00
}
2022-02-04 16:55:57 -08:00
2022-04-07 14:22:36 -07:00
/ * *
* Configure rage shaking support for sending bug reports .
* Modifies globals .
* @param { boolean } setUpPersistence When true ( default ) , the persistence will
* be set up immediately for the logs .
* @return { Promise } Resolves when set up .
* /
2024-09-10 09:49:35 +02:00
export async function init ( ) : Promise < void > {
2022-04-07 14:22:36 -07:00
global . mx_rage_logger = new ConsoleLogger ( ) ;
2024-09-23 14:35:10 +01:00
// configure loglevel based loggers:
2024-09-11 14:02:59 +01:00
setLogExtension ( logger , global . mx_rage_logger . log ) ;
// these are the child/prefixed loggers we want to capture from js-sdk
// there doesn't seem to be an easy way to capture all children
[ "MatrixRTCSession" , "MatrixRTCSessionManager" ] . forEach ( ( loggerName ) = > {
setLogExtension ( logger . getChild ( loggerName ) , global . mx_rage_logger . log ) ;
} ) ;
2022-04-07 14:22:36 -07:00
2024-09-23 14:35:10 +01:00
// intercept console logging so that we can get matrix_sdk logs:
// this is nasty, but no logging hooks are provided
[
"trace" as const ,
"debug" as const ,
"info" as const ,
"warn" as const ,
"error" as const ,
] . forEach ( ( level ) = > {
const originalMethod = window . console [ level ] ;
if ( ! originalMethod ) return ;
const prefix = ` ${ level . toUpperCase ( ) } matrix_sdk ` ;
window . console [ level ] = ( . . . args ) : void = > {
originalMethod ( . . . args ) ;
// args for calls from the matrix-sdk-crypto-wasm look like:
// ["DEBUG matrix_sdk_indexeddb::crypto_store: IndexedDbCryptoStore: opening main store matrix-js-sdk::matrix-sdk-crypto\n at /home/runner/.cargo/git/checkouts/matrix-rust-sdk-1f4927f82a3d27bb/07aa6d7/crates/matrix-sdk-indexeddb/src/crypto_store/mod.rs:267"]
if ( typeof args [ 0 ] === "string" && args [ 0 ] . startsWith ( prefix ) ) {
// we pass all the args on to the logger in case there are more sent in future
global . mx_rage_logger . log ( LogLevel [ level ] , "matrix_sdk" , . . . args ) ;
}
} ;
} ) ;
2023-01-16 16:43:45 +00:00
return tryInitStorage ( ) ;
2022-02-04 16:55:57 -08:00
}
2022-04-07 14:22:36 -07:00
/ * *
* Try to start up the rageshake storage for logs . If not possible ( client unsupported )
* then this no - ops .
* @return { Promise } Resolves when complete .
* /
2024-09-10 09:49:35 +02:00
async function tryInitStorage ( ) : Promise < void > {
2022-04-07 14:22:36 -07:00
if ( global . mx_rage_initStoragePromise ) {
return global . mx_rage_initStoragePromise ;
}
logger . log ( "Configuring rageshake persistence..." ) ;
// just *accessing* indexedDB throws an exception in firefox with
// indexeddb disabled.
let indexedDB ;
try {
indexedDB = window . indexedDB ;
2024-09-03 17:14:27 +02:00
} catch ( e ) {
logger . warn ( "Could not get indexDB from window." , e ) ;
}
2022-04-07 14:22:36 -07:00
if ( indexedDB ) {
global . mx_rage_store = new IndexedDBLogStore (
indexedDB ,
2023-10-11 10:42:04 -04:00
global . mx_rage_logger ,
2022-04-07 14:22:36 -07:00
) ;
global . mx_rage_initStoragePromise = global . mx_rage_store . connect ( ) ;
return global . mx_rage_initStoragePromise ;
}
global . mx_rage_initStoragePromise = Promise . resolve ( ) ;
return global . mx_rage_initStoragePromise ;
2022-02-04 16:55:57 -08:00
}
2022-04-07 14:22:36 -07:00
/ * *
* Get a recent snapshot of the logs , ready for attaching to a bug report
*
2022-06-08 16:36:22 +02:00
* @return { LogEntry [ ] } list of log data
2022-04-07 14:22:36 -07:00
* /
2022-06-08 16:36:22 +02:00
export async function getLogsForReport ( ) : Promise < LogEntry [ ] > {
2022-04-07 14:22:36 -07:00
if ( ! global . mx_rage_logger ) {
throw new Error ( "No console logger, did you forget to call init()?" ) ;
}
// If in incognito mode, store is null, but we still want bug report
// sending to work going off the in-memory console logs.
if ( global . mx_rage_store ) {
// flush most recent logs
await global . mx_rage_store . flush ( ) ;
2022-06-11 14:28:30 +02:00
return global . mx_rage_store . consume ( ) ;
2022-04-07 14:22:36 -07:00
} else {
return [
{
2023-01-16 16:43:45 +00:00
lines : global.mx_rage_logger.peekLogs ( ) ,
2022-04-07 14:22:36 -07:00
id : "-" ,
} ,
2022-06-11 14:28:30 +02:00
] ;
2022-04-07 14:22:36 -07:00
}
2022-02-01 15:11:06 -08:00
}
2022-06-11 14:28:30 +02:00
2022-06-11 14:32:25 +02:00
type StringifyReplacer = (
this : unknown ,
key : string ,
2023-10-11 10:42:04 -04:00
value : unknown ,
2022-06-11 14:32:25 +02:00
) = > unknown ;
2022-06-11 14:28:30 +02:00
// From https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Errors/Cyclic_object_value#circular_references
// Injects `<$ cycle-trimmed $>` wherever it cuts a cyclical object relationship
const getCircularReplacer = ( ) : StringifyReplacer = > {
const seen = new WeakSet ( ) ;
2023-09-26 12:08:08 +01:00
const depthMap = new WeakMap < object , number > ( ) ;
2023-09-26 18:30:39 +01:00
return function ( this : unknown , key : string , value : unknown ) : unknown {
2022-06-11 14:28:30 +02:00
if ( typeof value === "object" && value !== null ) {
if ( seen . has ( value ) ) {
return "<$ cycle-trimmed $>" ;
}
seen . add ( value ) ;
2023-09-26 12:08:08 +01:00
let depth = 0 ;
2023-09-26 18:30:39 +01:00
if ( this ) {
depth = depthMap . get ( this ) ? ? 0 ;
2023-09-26 12:08:08 +01:00
}
2023-09-26 18:30:39 +01:00
depthMap . set ( value , depth + 1 ) ;
2023-09-26 12:08:08 +01:00
if ( depth > DEPTH_LIMIT ) return "<$ object-pruned $>" ;
2022-06-11 14:28:30 +02:00
}
return value ;
} ;
} ;
2023-09-25 18:04:34 +01:00
enum LogLevel {
trace = 0 ,
debug = 1 ,
info = 2 ,
warn = 3 ,
error = 4 ,
silent = 5 ,
}
type LogExtensionFunc = (
level : LogLevel ,
. . . rest : ( Error | DOMException | object | string ) [ ]
) = > void ;
type LogLevelString = keyof typeof LogLevel ;
/ * *
2023-09-25 20:55:04 +01:00
* This method borrowed from livekit ( who also use loglevel and in turn essentially
2023-09-25 18:04:34 +01:00
* took loglevel ' s example honouring log levels ) . Adds a loglevel logging extension
* in the recommended way .
* /
2024-09-11 14:02:59 +01:00
function setLogExtension (
_loggerToExtend : Logger ,
extension : LogExtensionFunc ,
) : void {
const loggerToExtend = _loggerToExtend as unknown as loglevel . Logger ;
const originalFactory = loggerToExtend . methodFactory ;
loggerToExtend . methodFactory = function (
2023-10-11 10:30:57 -04:00
methodName ,
configLevel ,
2023-10-11 10:42:04 -04:00
loggerName ,
2023-10-11 10:30:57 -04:00
) : LoggingMethod {
2023-09-25 18:04:34 +01:00
const rawMethod = originalFactory ( methodName , configLevel , loggerName ) ;
const logLevel = LogLevel [ methodName as LogLevelString ] ;
const needLog = logLevel >= configLevel && logLevel < LogLevel . silent ;
return ( . . . args ) = > {
2024-09-11 14:02:59 +01:00
// we don't send the logger name to the raw method as some of them are already outputting the prefix
2023-09-25 18:04:34 +01:00
rawMethod . apply ( this , args ) ;
if ( needLog ) {
2024-09-11 14:02:59 +01:00
// we prefix the logger name to the extension
// this makes sure that the rageshake contains the logger name
extension ( logLevel , loggerName ? . toString ( ) , . . . args ) ;
2023-09-25 18:04:34 +01:00
}
} ;
} ;
2024-09-11 14:02:59 +01:00
loggerToExtend . setLevel ( loggerToExtend . getLevel ( ) ) ; // Be sure to call setLevel method in order to apply plugin
2023-09-25 18:04:34 +01:00
}