diff --git a/ScriptsMeasures/run_comparison.m b/ScriptsMeasures/run_comparison.m index 7eede324..439cb9aa 100644 --- a/ScriptsMeasures/run_comparison.m +++ b/ScriptsMeasures/run_comparison.m @@ -8,10 +8,16 @@ % sc = Optional argument specified as a vector of floats [0,Inf) of length 3 representing the score coefficient of each comparison model (Granger-causality, Logistic, Price Discovery). % When defined, comparison models with a coefficient equal to 0 are not computed. When left undefined, all the comparison models are computed and their scores are equally weighted. % lag_max = An integer [2,Inf) representing the maximum lag order to be evaluated for Granger-causality and Price Discovery models (optional, default=10). -% lag_sel = A string ('AIC', 'BIC', 'FPE' or 'HQIC') representing the lag order selection criteria for Granger-causality and Price Discovery models (optional, default='AIC'). +% lag_sel = A string representing the lag order selection criteria for Granger-causality and Price Discovery models (optional, default='AIC'): +% - 'AIC' for Akaike's Information Criterion. +% - 'BIC' for Bayesian Information Criterion. +% - 'FPE' for Final Prediction Error. +% - 'HQIC' for Hannan-Quinn Information Criterion. % gca = A float [0.01,0.10] representing the probability level of the F test critical value for the Granger-causality model (optional, default=0.01). % lma = A boolean that indicates whether to use the adjusted McFadden R2 for the Logistic model (optional, default=false). -% pdt = A string (either 'GG' for the Gonzalo-Granger component share or 'H' for the Hasbrouck information share) representing the type of metric to calculate for the Price Discovery model (optional, default='GG'). +% pdt = A string representing the type of metric to calculate for the Price Discovery model (optional, default='GG'): +% - 'GG' for Gonzalo-Granger Component Metric. +% - 'H' for Hasbrouck Information Metric. % analyze = A boolean that indicates whether to analyse the results and display plots (optional, default=false). % % [OUTPUT] @@ -297,23 +303,7 @@ function temp = validate_template(temp) - if (exist(temp,'file') == 0) - error('The template file could not be found.'); - end - - if (ispc()) - [file_status,file_sheets,file_format] = xlsfinfo(temp); - - if (isempty(file_status) || ~strcmp(file_format,'xlOpenXMLWorkbook')) - error('The template file is not a valid Excel spreadsheet.'); - end - else - [file_status,file_sheets] = xlsfinfo(temp); - - if (isempty(file_status)) - error('The template file is not a valid Excel spreadsheet.'); - end - end + file_sheets = validate_xls(temp,'T'); if ((numel(file_sheets) ~= 1) || ~strcmp(file_sheets{1},'Scores')) error('The template must contain only one sheet named ''Scores''.'); diff --git a/ScriptsMeasures/run_component.m b/ScriptsMeasures/run_component.m index 0d8983c7..7407369b 100644 --- a/ScriptsMeasures/run_component.m +++ b/ScriptsMeasures/run_component.m @@ -574,25 +574,8 @@ function plot_pca(ds,id) function temp = validate_template(temp) - if (exist(temp,'file') == 0) - error('The template file could not be found.'); - end - - if (ispc()) - [file_status,file_sheets,file_format] = xlsfinfo(temp); - - if (isempty(file_status) || ~strcmp(file_format,'xlOpenXMLWorkbook')) - error('The dataset file is not a valid Excel spreadsheet.'); - end - else - [file_status,file_sheets] = xlsfinfo(temp); - - if (isempty(file_status)) - error('The dataset file is not a valid Excel spreadsheet.'); - end - end - sheets = {'CATFIN VaRs' 'Indicators' 'PCA Overall Explained' 'PCA Overall Coefficients' 'PCA Overall Scores'}; + file_sheets = validate_xls(temp,'T'); if (~all(ismember(sheets,file_sheets))) error(['The template must contain the following sheets: ' sheets{1} sprintf(', %s', sheets{2:end}) '.']); diff --git a/ScriptsMeasures/run_connectedness.m b/ScriptsMeasures/run_connectedness.m index ca3e73af..91d42893 100644 --- a/ScriptsMeasures/run_connectedness.m +++ b/ScriptsMeasures/run_connectedness.m @@ -592,25 +592,8 @@ function plot_centralities(ds,id) function temp = validate_template(temp) - if (exist(temp,'file') == 0) - error('The template file could not be found.'); - end - - if (ispc()) - [file_status,file_sheets,file_format] = xlsfinfo(temp); - - if (isempty(file_status) || ~strcmp(file_format,'xlOpenXMLWorkbook')) - error('The dataset file is not a valid Excel spreadsheet.'); - end - else - [file_status,file_sheets] = xlsfinfo(temp); - - if (isempty(file_status)) - error('The dataset file is not a valid Excel spreadsheet.'); - end - end - sheets = {'Indicators' 'Average Adjacency Matrix' 'Average Centrality Measures'}; + file_sheets = validate_xls(temp,'T'); if (~all(ismember(sheets,file_sheets))) error(['The template must contain the following sheets: ' sheets{1} sprintf(', %s', sheets{2:end}) '.']); diff --git a/ScriptsMeasures/run_cross_entropy.m b/ScriptsMeasures/run_cross_entropy.m index 01d8aeef..487dcf97 100644 --- a/ScriptsMeasures/run_cross_entropy.m +++ b/ScriptsMeasures/run_cross_entropy.m @@ -3,10 +3,16 @@ % temp = A string representing the full path to the Excel spreadsheet used as a template for the results file. % out = A string representing the full path to the Excel spreadsheet to which the results are written, eventually replacing the previous ones. % bw = An integer [21,252] representing the dimension of each rolling window (optional, default=252). -% sel = A string (either 'F' for firms or 'G' for groups) representing the time series selection method (optional, default='F'). +% sel = A string representing the time series selection method (optional, default='F'): +% - 'F' for firms. +% - 'G' for groups. % rr = A float [0,1] representing the recovery rate in case of default (optional, default=0.4). -% pw = A string (either 'A' for plain average or 'W' for progressive average) representing the probabilities of default weighting method (optional, default='W'). -% md = A string (either 'N' for normal or 'T' for Student's T) representing the multivariate distribution used by the CIMDO model (optional, default='N'). +% pw = A string representing the probabilities of default weighting method (optional, default='W'): +% - 'A' for plain average. +% - 'W' for progressive average. +% md = A string representing the multivariate distribution used by the CIMDO model (optional, default='N'): +% - 'N' for normal distribution. +% - 'T' for Student's T distribution. % analyze = A boolean that indicates whether to analyse the results and display plots (optional, default=false). % % [OUTPUT] @@ -930,25 +936,8 @@ function plot_function(subs,data) function temp = validate_template(temp) - if (exist(temp,'file') == 0) - error('The template file could not be found.'); - end - - if (ispc()) - [file_status,file_sheets,file_format] = xlsfinfo(temp); - - if (isempty(file_status) || ~strcmp(file_format,'xlOpenXMLWorkbook')) - error('The dataset file is not a valid Excel spreadsheet.'); - end - else - [file_status,file_sheets] = xlsfinfo(temp); - - if (isempty(file_status)) - error('The dataset file is not a valid Excel spreadsheet.'); - end - end - sheets = {'Indicators' 'Average DiDe' 'SI' 'SV' 'CoJPoDs'}; + file_sheets = validate_xls(temp,'T'); if (~all(ismember(sheets,file_sheets))) error(['The template must contain the following sheets: ' sheets{1} sprintf(', %s', sheets{2:end}) '.']); diff --git a/ScriptsMeasures/run_cross_quantilogram.m b/ScriptsMeasures/run_cross_quantilogram.m index ba9bee47..75a2ab74 100644 --- a/ScriptsMeasures/run_cross_quantilogram.m +++ b/ScriptsMeasures/run_cross_quantilogram.m @@ -5,7 +5,9 @@ % bw = An integer [21,252] representing the dimension of each rolling window (optional, default=252). % a = A float [0.01,0.10] representing the target quantile (optional, default=0.05). % lags = An integer [10,60] representing the maximum number of lags (optional, default=60). -% cim = A string (either 'SB' for stationary bootstrap or 'SN' for self-normalization) representing the computational approach of confidence intervals (optional, default='SB'). +% cim = A string representing the computational approach of confidence intervals (optional, default='SB'): +% - 'SB' for stationary bootstrap. +% - 'SN' for self-normalization. % cis = Optional argument representing the significance level of confidence intervals and whose value depends on the the chosen computational approach: % - for stationary bootstrap cross-quantilograms, a float (0.0,0.1] (default=0.050); % - for self-normalization cross-quantilograms, a float {0.005;0.010;0.025;0.050;0.100} (default=0.050). @@ -551,26 +553,9 @@ function plot_function(subs,data) function temp = validate_template(temp) - if (exist(temp,'file') == 0) - error('The template file could not be found.'); - end - - if (ispc()) - [file_status,file_sheets,file_format] = xlsfinfo(temp); - - if (isempty(file_status) || ~strcmp(file_format,'xlOpenXMLWorkbook')) - error('The template file is not a valid Excel spreadsheet.'); - end - else - [file_status,file_sheets] = xlsfinfo(temp); - - if (isempty(file_status)) - error('The template file is not a valid Excel spreadsheet.'); - end - end - sheets = {'Full From' 'Full To' 'Partial From' 'Partial To'}; - + file_sheets = validate_xls(temp,'T'); + if (~all(ismember(sheets,file_sheets))) error(['The template must contain the following sheets: ' sheets{1} sprintf(', %s',sheets{2:end}) '.']); end diff --git a/ScriptsMeasures/run_cross_sectional.m b/ScriptsMeasures/run_cross_sectional.m index 7f9059be..18689dcf 100644 --- a/ScriptsMeasures/run_cross_sectional.m +++ b/ScriptsMeasures/run_cross_sectional.m @@ -108,6 +108,11 @@ ds.SES(1:offset,i) = ses; ds.SRISK(1:offset,i) = srisk; + [caviar,~,ir_fm,ir_mf] = bivariate_caviar(r_i,ds.A); + ds.CAViaR(1:offset,i) = caviar; + ds.CAViaRIRFM{i} = ir_fm; + ds.CAViaRIRMF{i} = ir_mf; + if (getappdata(bar,'Stop')) stopped = true; break; @@ -154,15 +159,16 @@ if (analyze) safe_plot(@(id)plot_idiosyncratic_averages(ds,id)); - safe_plot(@(id)plot_sequence(ds,'Beta',id)); - safe_plot(@(id)plot_sequence(ds,'VaR',id)); - safe_plot(@(id)plot_sequence(ds,'ES',id)); + safe_plot(@(id)plot_sequence_other(ds,'Beta',id)); + safe_plot(@(id)plot_sequence_other(ds,'VaR',id)); + safe_plot(@(id)plot_sequence_other(ds,'ES',id)); safe_plot(@(id)plot_systemic_averages(ds,id)); - safe_plot(@(id)plot_sequence(ds,'CoVaR',id)); - safe_plot(@(id)plot_sequence(ds,'Delta CoVaR',id)); - safe_plot(@(id)plot_sequence(ds,'MES',id)); - safe_plot(@(id)plot_sequence(ds,'SES',id)); - safe_plot(@(id)plot_sequence(ds,'SRISK',id)); + safe_plot(@(id)plot_sequence_caviar(ds,id)); + safe_plot(@(id)plot_sequence_other(ds,'CoVaR',id)); + safe_plot(@(id)plot_sequence_other(ds,'Delta CoVaR',id)); + safe_plot(@(id)plot_sequence_other(ds,'MES',id)); + safe_plot(@(id)plot_sequence_other(ds,'SES',id)); + safe_plot(@(id)plot_sequence_other(ds,'SRISK',id)); safe_plot(@(id)plot_correlations(ds,id)); safe_plot(@(id)plot_rankings(ds,id)); end @@ -201,29 +207,35 @@ ses_label = [' (CAR=' num2str(ds.CAR * 100) '%)']; srisk_label = [' (D=' num2str(ds.D * 100) '%, CAR=' num2str(ds.CAR * 100) '%)']; - ds.LabelsMeasuresSimple = {'Beta' 'VaR' 'ES' 'CoVaR' 'Delta CoVaR' 'MES' 'SES' 'SRISK'}; - ds.LabelsMeasures = {'Beta' ['VaR' k_all_label] ['ES' k_all_label] ['CoVaR' k_all_label] ['Delta CoVaR' k_all_label] ['MES' k_all_label] ['SES' ses_label] ['SRISK' srisk_label]}; + ds.LabelsMeasuresSimple = {'Beta' 'VaR' 'ES' 'CAViaR' 'CoVaR' 'Delta CoVaR' 'MES' 'SES' 'SRISK'}; + ds.LabelsMeasures = {'Beta' ['VaR' k_all_label] ['ES' k_all_label] ['CAViaR' k_all_label] ['CoVaR' k_all_label] ['Delta CoVaR' k_all_label] ['MES' k_all_label] ['SES' ses_label] ['SRISK' srisk_label]}; ds.LabelsSheetsSimple = [ds.LabelsMeasuresSimple {'Averages'}]; ds.LabelsSheets = [ds.LabelsMeasures {'Averages'}]; ds.TargetLiabilities = lb; ds.TargetLiabilitiesRolled = lbr; + + m = numel(ds.LabelsMeasuresSimple); + ds.CAViaRIRFM = cell(m,1); + ds.CAViaRIRMF = cell(m,1); + ds.Beta = NaN(t,n); ds.VaR = NaN(t,n); ds.ES = NaN(t,n); + ds.CAViaR = NaN(t,n); ds.CoVaR = NaN(t,n); ds.DeltaCoVaR = NaN(t,n); ds.MES = NaN(t,n); ds.SES = NaN(t,n); ds.SRISK = NaN(t,n); - ds.Averages = NaN(t,8); + ds.Averages = NaN(t,m); - ds.RankingConcordance = NaN(8,8); - ds.RankingStability = NaN(1,8); + ds.RankingConcordance = NaN(m); + ds.RankingStability = NaN(1,m); - ds.ComparisonReferences = {'Averages' 4:8 strcat({'CS-'},strrep(ds.LabelsMeasuresSimple(4:end),'Delta ','D'))}; + ds.ComparisonReferences = {'Averages' 4:9 strcat({'CS-'},strrep(ds.LabelsMeasuresSimple(4:end),'Delta ','D'))}; end @@ -236,12 +248,13 @@ beta_avg = sum(ds.Beta .* weights,2,'omitnan'); var_avg = sum(ds.VaR .* weights,2,'omitnan'); es_avg = sum(ds.ES .* weights,2,'omitnan'); + caviar_avg = sum(ds.CAViaR .* weights,2,'omitnan'); covar_avg = sum(ds.CoVaR .* weights,2,'omitnan'); dcovar_avg = sum(ds.DeltaCoVaR .* weights,2,'omitnan'); mes_avg = sum(ds.MES .* weights,2,'omitnan'); ses_avg = sum(ds.SES .* weights,2,'omitnan'); srisk_avg = sum(ds.SRISK .* weights,2,'omitnan'); - ds.Averages = [beta_avg var_avg es_avg covar_avg dcovar_avg mes_avg ses_avg srisk_avg]; + ds.Averages = [beta_avg var_avg es_avg caviar_avg covar_avg dcovar_avg mes_avg ses_avg srisk_avg]; measures_len = numel(ds.LabelsMeasuresSimple); measures = cell(measures_len,1); @@ -470,43 +483,25 @@ function plot_idiosyncratic_averages(ds,id) function plot_systemic_averages(ds,id) - y_limits = zeros(5,2); + y_limits = zeros(6,2); - averages_quantile = ds.Averages(:,4:6); - y_limits(1:3,:) = repmat(plot_limits(averages_quantile,0.1),3,1); + averages_quantile = ds.Averages(:,4:7); + y_limits(1:4,:) = repmat(plot_limits(averages_quantile,0.1),4,1); - averages_volume = ds.Averages(:,7:8); - y_limits(4:5,:) = repmat(plot_limits(averages_volume,0.1),2,1); + averages_volume = ds.Averages(:,8:9); + y_limits(5:6,:) = repmat(plot_limits(averages_volume,0.1),2,1); - subplot_offsets = cell(5,1); - subplot_offsets{1} = [1 3 5]; - subplot_offsets{2} = [7 9 11]; - subplot_offsets{3} = [13 15 17]; - subplot_offsets{4} = [2 4 6 8]; - subplot_offsets{5} = [12 14 16 18]; + subplot_offsets = [1; 3; 5; 2; 4; 6]; f = figure('Name','Cross-Sectional Measures > Systemic Averages','Units','normalized','Position',[100 100 0.85 0.85],'Tag',id); - subs = gobjects(5,1); - height_delta = NaN; + subs = gobjects(6,1); - for i = 1:5 - sub = subplot(9,2,subplot_offsets{i}); + for i = 1:6 + sub = subplot(3,2,subplot_offsets(i)); plot(sub,ds.DatesNum,smooth_data(ds.Averages(:,i+3)),'Color',[0.000 0.447 0.741]); set(sub,'YLim',y_limits(i,:)); title(sub,ds.LabelsMeasures{i+3}); - - if (i == 1) - sub_position = get(sub,'Position'); - height_old = sub_position(4); - height_new = height_old * 0.8; - height_delta = height_old - height_new; - - set(sub,'Position',[sub_position(1) (sub_position(2) + height_delta) sub_position(3) (sub_position(4) - height_delta)]); - else - sub_position = get(sub,'Position'); - set(sub,'Position',[sub_position(1) (sub_position(2) + height_delta) sub_position(3) (sub_position(4) - height_delta)]); - end subs(i) = sub; end @@ -522,11 +517,11 @@ function plot_systemic_averages(ds,id) y_ticks = get(subs(1),'YTick'); y_tick_labels = arrayfun(@(x)sprintf('%.2f',x),y_ticks,'UniformOutput',false); - set(subs(1:3),'YTick',y_ticks,'YTickLabel',y_tick_labels); + set(subs(1:4),'YTick',y_ticks,'YTickLabel',y_tick_labels); - y_ticks = get(subs(4),'YTick'); + y_ticks = get(subs(5),'YTick'); y_tick_labels = arrayfun(@(x)sprintf('%.0f',x),y_ticks,'UniformOutput',false); - set(subs(4:5),'YTick',y_ticks,'YTickLabel',y_tick_labels); + set(subs(5:6),'YTick',y_ticks,'YTickLabel',y_tick_labels); figure_title('Systemic Averages'); @@ -695,7 +690,99 @@ function plot_rankings(ds,id) end -function plot_sequence(ds,target,id) +function plot_sequence_caviar(ds,id) + + n = ds.N; + t = ds.T; + dn = ds.DatesNum; + mt = ds.MonthlyTicks; + + ts = smooth_data(ds.CAViaR); + + data = [repmat({dn},1,n); mat2cell(ts,t,ones(1,n)); repmat({1:200},1,n); ds.CAViaRIRFM.'; ds.CAViaRIRMF.']; + + [~,index] = ismember('CAViaR',ds.LabelsMeasuresSimple); + plots_title = cell(3,20); + plots_title(1,:) = repmat(ds.LabelsMeasures(index),1,n); + plots_title(2,:) = repmat({'Impulse Response - Firm on Market Shock'},1,n); + plots_title(3,:) = repmat({'Impulse Response - Market on Firm Shock'},1,n); + + x_limits = {[dn(1) dn(end)] [1 200] [1 200]}; + y_limits = {plot_limits(ts,0.1) [] []}; + + core = struct(); + + core.N = n; + core.Data = data; + core.Function = @(subs,data)plot_function(subs,data); + + core.OuterTitle = 'Cross-Sectional Measures > CAViaR Time Series'; + core.InnerTitle = 'CAViaR Time Series'; + core.SequenceTitles = ds.FirmNames; + + core.PlotsAllocation = [2 2]; + core.PlotsSpan = {[1 2] 3 4}; + core.PlotsTitle = plots_title; + + core.XDates = {mt [] []}; + core.XGrid = {true true true}; + core.XLabel = {[] [] []}; + core.XLimits = x_limits; + core.XRotation = {45 [] []}; + core.XTick = {[] [] []}; + core.XTickLabels = {[] [] []}; + + core.YGrid = {true true true}; + core.YLabel = {[] [] []}; + core.YLimits = y_limits; + core.YRotation = {[] [] []}; + core.YTick = {[] [] []}; + core.YTickLabels = {[] [] []}; + + sequential_plot(core,id); + + function plot_function(subs,data) + + x_caviar = data{1}; + caviar = data{2}; + + x_ir = data{3}; + ir_fm = data{4}; + ir_mf = data{5}; + + d = find(isnan(caviar),1,'first'); + + if (isempty(d)) + xd = []; + else + xd = x_caviar(d) - 1; + end + + plot(subs(1),x_caviar,caviar,'Color',[0.000 0.447 0.741]); + + if (~isempty(xd)) + hold(subs(1),'on'); + plot(subs(1),[xd xd],get(subs(1),'YLim'),'Color',[1 0.4 0.4]); + hold(subs(1),'off'); + end + + plot(subs(2),x_ir,ir_fm(:,1),'Color',[0.000 0.447 0.741]); + hold(subs(2),'on'); + plot(subs(2),x_ir,ir_fm(:,2),'Color',[1 0.4 0.4],'LineStyle','--'); + plot(subs(2),x_ir,ir_fm(:,3),'Color',[1 0.4 0.4],'LineStyle','--'); + hold(subs(2),'off'); + + plot(subs(3),x_ir,ir_mf(:,1),'Color',[0.000 0.447 0.741]); + hold(subs(3),'on'); + plot(subs(3),x_ir,ir_mf(:,2),'Color',[1 0.4 0.4],'LineStyle','--'); + plot(subs(3),x_ir,ir_mf(:,3),'Color',[1 0.4 0.4],'LineStyle','--'); + hold(subs(3),'off'); + + end + +end + +function plot_sequence_other(ds,target,id) n = ds.N; t = ds.T; @@ -782,26 +869,9 @@ function plot_function(subs,data) function temp = validate_template(temp) - if (exist(temp,'file') == 0) - error('The template file could not be found.'); - end - - if (ispc()) - [file_status,file_sheets,file_format] = xlsfinfo(temp); - - if (isempty(file_status) || ~strcmp(file_format,'xlOpenXMLWorkbook')) - error('The template file is not a valid Excel spreadsheet.'); - end - else - [file_status,file_sheets] = xlsfinfo(temp); - - if (isempty(file_status)) - error('The template file is not a valid Excel spreadsheet.'); - end - end + sheets = {'Beta' 'VaR' 'ES' 'CAViaR' 'CoVaR' 'Delta CoVaR' 'MES' 'SES' 'SRISK' 'Averages'}; + file_sheets = validate_xls(temp,'T'); - sheets = {'Beta' 'VaR' 'ES' 'CoVaR' 'Delta CoVaR' 'MES' 'SES' 'SRISK' 'Averages'}; - if (~all(ismember(sheets,file_sheets))) error(['The template must contain the following sheets: ' sheets{1} sprintf(', %s',sheets{2:end}) '.']); end diff --git a/ScriptsMeasures/run_default.m b/ScriptsMeasures/run_default.m index c9261b49..7db5b33a 100644 --- a/ScriptsMeasures/run_default.m +++ b/ScriptsMeasures/run_default.m @@ -3,7 +3,9 @@ % temp = A string representing the full path to the Excel spreadsheet used as a template for the results file. % out = A string representing the full path to the Excel spreadsheet to which the results are written, eventually replacing the previous ones. % bw = An integer [21,252] representing the dimension of each rolling window (optional, default=252). -% op = A string (either 'BSM' for Black-Scholes-Merton or 'GC' for Gram-Charlier) representing the option pricing model (optional, default='BSM'). +% op = A string representing the option pricing model (optional, default='BSM'): +% - 'BSM' for Black-Scholes-Merton. +% - 'GC' for Gram-Charlier. % lst = A float or a vector of floats (0,Inf) representing the long-term to short-term liabilities ratio(s) used to calculate D2C and D2D (optional, default=3). % car = A float [0.03,0.20] representing the capital adequacy ratio used to calculate the D2C (optional, default=0.08). % rr = A float [0,1] representing the recovery rate in case of default used to calculate the DIP (optional, default=0.45). @@ -235,12 +237,12 @@ if (analyze) safe_plot(@(id)plot_distances(ds,id)); - safe_plot(@(id)plot_sequence(ds,'D2D',true,id)); - safe_plot(@(id)plot_sequence(ds,'D2C',true,id)); + safe_plot(@(id)plot_sequence(ds,'D2D',id)); + safe_plot(@(id)plot_sequence(ds,'D2C',id)); safe_plot(@(id)plot_dip(ds,id)); safe_plot(@(id)plot_scca(ds,id)); - safe_plot(@(id)plot_sequence(ds,'SCCA EL',false,id)); - safe_plot(@(id)plot_sequence(ds,'SCCA CL',false,id)); + safe_plot(@(id)plot_sequence(ds,'SCCA EL',id)); + safe_plot(@(id)plot_sequence(ds,'SCCA CL',id)); safe_plot(@(id)plot_rankings(ds,id)); end @@ -672,18 +674,16 @@ function plot_scca(ds,id) end -function plot_sequence(ds,target,distance,id) +function plot_sequence(ds,target,id) + + is_distance = any(strcmp(target,{'D2C' 'D2D'})); n = ds.N; t = ds.T; dn = ds.DatesNum; mt = ds.MonthlyTicks; - if (distance) - ts = smooth_data(ds.(strrep(target,' ',''))); - else - ts = smooth_data(ds.(strrep(target,' ',''))); - end + ts = smooth_data(ds.(strrep(target,' ',''))); data = [repmat({dn},1,n); mat2cell(ts,t,ones(1,n))]; @@ -692,7 +692,7 @@ function plot_sequence(ds,target,distance,id) x_limits = [dn(1) dn(end)]; - if (distance) + if (is_distance) y_limits = plot_limits(ts,0.1,[],[],-1); else y_limits = plot_limits(ts,0.1); @@ -702,7 +702,7 @@ function plot_sequence(ds,target,distance,id) core.N = n; core.Data = data; - core.Function = @(subs,data)plot_function(subs,data,distance); + core.Function = @(subs,data)plot_function(subs,data,is_distance); core.OuterTitle = ['Default Measures > ' target ' Time Series']; core.InnerTitle = [target ' Time Series']; @@ -729,7 +729,7 @@ function plot_sequence(ds,target,distance,id) sequential_plot(core,id); - function plot_function(subs,data,distance) + function plot_function(subs,data,is_distance) x = data{1}; y = data{2}; @@ -744,7 +744,7 @@ function plot_function(subs,data,distance) plot(subs(1),x,y,'Color',[0.000 0.447 0.741]); - if (distance) + if (is_distance) hold(subs(1),'on'); plot(subs(1),x,zeros(numel(x),1),'Color',[1 0.4 0.4]); hold(subs(1),'off'); @@ -794,27 +794,10 @@ function plot_function(subs,data,distance) end -function out_temp = validate_template(out_temp) - - if (exist(out_temp,'file') == 0) - error('The template file could not be found.'); - end - - if (ispc()) - [file_status,file_sheets,file_format] = xlsfinfo(out_temp); - - if (isempty(file_status) || ~strcmp(file_format,'xlOpenXMLWorkbook')) - error('The dataset file is not a valid Excel spreadsheet.'); - end - else - [file_status,file_sheets] = xlsfinfo(out_temp); - - if (isempty(file_status)) - error('The dataset file is not a valid Excel spreadsheet.'); - end - end +function temp = validate_template(temp) sheets = {'D2D' 'D2C' 'SCCA EL' 'SCCA CL' 'Indicators'}; + file_sheets = validate_xls(temp,'T'); if (~all(ismember(sheets,file_sheets))) error(['The template must contain the following sheets: ' sheets{1} sprintf(', %s',sheets{2:end}) '.']); diff --git a/ScriptsMeasures/run_liquidity.m b/ScriptsMeasures/run_liquidity.m index cda033b7..1917b9e0 100644 --- a/ScriptsMeasures/run_liquidity.m +++ b/ScriptsMeasures/run_liquidity.m @@ -5,7 +5,11 @@ % bwl = An integer [90,252] representing the dimension of the long bandwidth (optional, default=252). % bwm = An integer [21,90) representing the dimension of the medium bandwidth (optional, default=21). % bws = An integer [5,21) representing the dimension of the short bandwidth (optional, default=5). -% mem = A string ('B' for Baseline MEM, 'A' for Asymmetric MEM, 'P' for Asymmetric Power MEM, 'S' for Spline MEM) representing the MEM type used to calculate the ILLIQ (optional, default='B'). +% mem = A string representing the MEM type used to calculate the ILLIQ (optional, default='B'): +% - 'B' for Baseline MEM. +% - 'A' for Asymmetric MEM. +% - 'P' for Asymmetric Power MEM. +% - 'S' for Spline MEM. % w = An integer [500,Inf) representing the number of sweeps used to calculate the RIS (optional, default=500). % c = A float (0,Inf) representing the starting coefficient value used to calculate the RIS (optional, default=0.01). % s2 = A float (0,Inf) representing the starting variance of innovations used to calculate the RIS (optional, default=0.0004). @@ -654,26 +658,9 @@ function plot_function(subs,data) function temp = validate_template(temp) - if (exist(temp,'file') == 0) - error('The template file could not be found.'); - end - - if (ispc()) - [file_status,file_sheets,file_format] = xlsfinfo(temp); - - if (isempty(file_status) || ~strcmp(file_format,'xlOpenXMLWorkbook')) - error('The template file is not a valid Excel spreadsheet.'); - end - else - [file_status,file_sheets] = xlsfinfo(temp); - - if (isempty(file_status)) - error('The template file is not a valid Excel spreadsheet.'); - end - end - sheets = {'HHLR' 'ILLIQ' 'ILLIQC' 'RIS' 'TR' 'VR' 'Averages'}; - + file_sheets = validate_xls(temp,'T'); + if (~all(ismember(sheets,file_sheets))) error(['The template must contain the following sheets: ' sheets{1} sprintf(', %s',sheets{2:end}) '.']); end diff --git a/ScriptsMeasures/run_regime_switching.m b/ScriptsMeasures/run_regime_switching.m index d32f512a..b568c90b 100644 --- a/ScriptsMeasures/run_regime_switching.m +++ b/ScriptsMeasures/run_regime_switching.m @@ -847,26 +847,9 @@ function plot_function(subs,data,k) function temp = validate_template(temp) - if (exist(temp,'file') == 0) - error('The template file could not be found.'); - end - - if (ispc()) - [file_status,file_sheets,file_format] = xlsfinfo(temp); - - if (isempty(file_status) || ~strcmp(file_format,'xlOpenXMLWorkbook')) - error('The template file is not a valid Excel spreadsheet.'); - end - else - [file_status,file_sheets] = xlsfinfo(temp); - - if (isempty(file_status)) - error('The template file is not a valid Excel spreadsheet.'); - end - end - sheets = {'Indicators' 'RS2 CM' 'RS2 CV' 'RS2 SP' 'RS3 CM' 'RS3 CV' 'RS3 SP' 'RS4 CM' 'RS4 CV' 'RS4 SP'}; - + file_sheets = validate_xls(temp,'T'); + if (~all(ismember(sheets,file_sheets))) error(['The template must contain the following sheets: ' sheets{1} sprintf(', %s',sheets{2:end}) '.']); end diff --git a/ScriptsMeasures/run_spillover.m b/ScriptsMeasures/run_spillover.m index 5b730149..5ab0d56d 100644 --- a/ScriptsMeasures/run_spillover.m +++ b/ScriptsMeasures/run_spillover.m @@ -4,7 +4,9 @@ % out = A string representing the full path to the Excel spreadsheet to which the results are written, eventually replacing the previous ones. % bw = An integer [21,252] representing the dimension of each rolling window (optional, default=252). % bws = An integer [1,10] representing the number of steps between each rolling window (optional, default=10). -% fevd = A string (either 'G' for generalized or 'O' for orthogonal) representing the FEVD type used by the variance decomposition (optional, default='G'). +% fevd = A string representing the FEVD type used by the variance decomposition (optional, default='G'): +% - 'G' for generalized FEVD. +% - 'O' for orthogonal FEVD. % lags = An integer [1,3] representing the number of lags of the VAR model used by the variance decomposition (optional, default=2). % h = An integer [1,10] representing the prediction horizon used by the variance decomposition (optional, default=4). % analyze = A boolean that indicates whether to analyse the results and display plots (optional, default=false). @@ -529,25 +531,8 @@ function plot_function(subs,data) function temp = validate_template(temp) - if (exist(temp,'file') == 0) - error('The template file could not be found.'); - end - - if (ispc()) - [file_status,file_sheets,file_format] = xlsfinfo(temp); - - if (isempty(file_status) || ~strcmp(file_format,'xlOpenXMLWorkbook')) - error('The dataset file is not a valid Excel spreadsheet.'); - end - else - [file_status,file_sheets] = xlsfinfo(temp); - - if (isempty(file_status)) - error('The dataset file is not a valid Excel spreadsheet.'); - end - end - sheets = {'From' 'To' 'Net' 'Indicators'}; + file_sheets = validate_xls(temp,'T'); if (~all(ismember(sheets,file_sheets))) error(['The template must contain the following sheets: ' sheets{1} sprintf(', %s',sheets{2:end}) '.']); diff --git a/ScriptsModels/bivariate_caviar.m b/ScriptsModels/bivariate_caviar.m new file mode 100644 index 00000000..c94b4e36 --- /dev/null +++ b/ScriptsModels/bivariate_caviar.m @@ -0,0 +1,330 @@ +% [INPUT] +% r = A float t-by-2 matrix (-Inf,Inf) representing the logarithmic returns, in which the first column represents the market returns and the second column represents the firm returns. +% a = A float [0.01,0.10] representing the target quantile. +% +% [OUTPUT] +% caviar = A column vector of floats [0,Inf) of length t representing the Conditional Autoregressive Value at Risk. +% beta = A column vector of floats (-Inf,Inf) of length 10 representing the model coefficients. +% ir_fm = A float t-by-3 matrix (-Inf,Inf) representing the impulse response of the firm against a shock of the market, where the second column is the lower bound and the third column is the upper bound. +% ir_mf = A float t-by-3 matrix (-Inf,Inf) representing the impulse response of the market against a shock of the firm, where the second column is the lower bound and the third column is the upper bound. +% se = A column vector of floats [0,Inf) of length 10 representing model standard errors. +% stats = A row vector of floats [0,Inf) of length 2 representing model error statistics, where the first element is the critical value and the second element is the p-value. + +function [caviar,beta,ir_fm,ir_mf,se,stats] = bivariate_caviar(varargin) + + persistent ip; + + if (isempty(ip)) + ip = inputParser(); + ip.addRequired('r',@(x)validateattributes(x,{'double'},{'real' 'finite' '2d' 'nonempty' 'size' [NaN 2]})); + ip.addRequired('a',@(x)validateattributes(x,{'double'},{'real' 'finite' '>=' 0.01 '<=' 0.10 'scalar'})); + end + + ip.parse(varargin{:}); + + ipr = ip.Results; + r = validate_input(ipr.r); + a = ipr.a; + + nargoutchk(2,6); + + switch (nargout) + case 3 + error('Both impulse response outputs must be assigned.'); + case 5 + error('Both standard error outputs must be assigned.'); + end + + cir = false; + cse = false; + + if (nargout >= 3) + cir = true; + + if (nargout >= 5) + cse = true; + end + end + + [caviar,beta,ir_fm,ir_mf,se,stats] = bivariate_caviar_internal(r,a,cir,cse); + +end + +function [caviar,beta,ir_fm,ir_mf,se,stats] = bivariate_caviar_internal(r,a,cir,cse) + + persistent options; + persistent um_beta0; + + if (isempty(options)) + options = optimset(optimset(@fminsearch),'Display','none','MaxFunEvals',1000,'MaxIter',1000,'TolFun',1e-8,'TolX',1e-8); + end + + if (isempty(um_beta0)) + rng_current = rng(); + rng(double(bitxor(uint16('T'),uint16('B')))); + cleanup = onCleanup(@()rng(rng_current)); + um_beta0 = unifrnd(0,1,[10000 3]); + end + + up = isempty(getCurrentTask()); + + c = zeros(3,2); + q = zeros(1,2); + + if (up) + if (size(r,1) >= 200) + qo = round(100 * a,0); + + parfor i = 1:2 + r_i = r(:,i); + rhs_i = sortrows(r_i(1:100),1,'ascend'); + q_i = rhs_i(qo); + + c(:,i) = univariate_model(r_i,q_i,a,um_beta0,options); + q(i) = q_i; + end + else + parfor i = 1:2 + r_i = r(:,i); + q_i = quantile(r_i,a); + + c(:,i) = univariate_model(r_i,q_i,a,um_beta0,options); + q(i) = q_i; + end + end + else + if (size(r,1) >= 200) + qo = round(100 * a,0); + + for i = 1:2 + r_i = r(:,i); + rhs_i = sortrows(r_i(1:100),1,'ascend'); + q_i = rhs_i(qo); + + c(:,i) = univariate_model(r_i,q_i,a,um_beta0); + q(i) = q_i; + end + else + for i = 1:2 + r_i = r(:,i); + q_i = quantile(r_i,a); + + c(:,i) = univariate_model(r_i,q_i,a,um_beta0); + q(i) = q_i; + end + end + end + + k1 = diag(c(2,:)); + k2 = diag(c(3,:)); + + beta0 = [c(1,:).'; k1(:); k2(:)]; + beta1 = fminsearch(@(x)objective(x,r,a,q),beta0,options); + beta = fminsearch(@(x)objective(x,r,a,q),beta1,options); + + [~,~,caviar_full] = objective(beta,r,a,q); + caviar = -1 .* min(caviar_full(:,2),0); + + if (cir) + [vc,se,stats] = standard_errors(r,a,beta,caviar_full); + ir_fm = impulse_response(r,beta,vc,true); + ir_mf = impulse_response(r,beta,vc,false); + + if (~cse) + se = []; + stats = []; + end + else + ir_fm = []; + ir_mf = []; + se = []; + stats = []; + end + +end + +function ir = impulse_response(r,beta,vc,mkt) + + if (mkt) + shock = [2; 0]; + else + shock = [0; 2]; + end + + c = cov(r); + cl = chol(c,'lower'); + + b = reshape(beta,2,5); + m1 = b(:,2:3); + m2 = b(:,4:end); + + ir_all = zeros(200,2); + ir_all(1,:) = m1 * abs(cl * shock); + + for i = 2:200 + ir_all(i,:) = m2 * ir_all(i-1,:).'; + end + + e2 = eye(2); + e4 = eye(4); + z42 = zeros(4,2); + z44 = zeros(4); + + da = [z42 e4 z44]; + db = [z42 z44 e4]; + + g1 = kron(abs(cl * shock).',e2) * da; + g2 = (m2 * g1) + kron((m1 * abs(cl * shock)).',e2) * db; + + se = zeros(200,2); + se(1,:) = diag(sqrt(g1 * vc * g1.')); + se(2,:) = diag(sqrt(g2 * vc * g2.')); + + for i = 3:200 + sb = zeros(4); + + for j = 0:i-2 + sb = sb + kron((m2.')^(i-2-j),m2^j); + end + + g = (m2^(i-1) * g1) + kron((m1 * abs(cl * shock)).',e2) * (sb * db); + se(i,:) = diag(sqrt(g * vc * g.')); + end + + ir_all_lb = ir_all - (2 .* se); + ir_all_ub = ir_all + (2 .* se); + + if (mkt) + ir = [ir_all(:,2) ir_all_lb(:,2) ir_all_ub(:,2)]; + else + ir = [ir_all(:,1) ir_all_lb(:,1) ir_all_ub(:,1)]; + end + +end + +function [rq,hits,caviar] = objective(beta,r,a,q) + + t = size(r,1); + + caviar = zeros(t,2); + caviar(1,:) = q; + + b = reshape(beta,2,5); + m0 = b(:,1).'; + m1 = b(:,2:3).'; + m2 = b(:,4:end).'; + + for t = 2:t + caviar(t,:) = m0 + (abs(r(t-1,:)) * m1) + (caviar(t-1,:) * m2); + end + + hits = (a * ones(t,2)) - (r < caviar); + rq = mean(sum((r - caviar) .* hits,2)); + + if (~isfinite(rq)) + rq = 1e100; + end + +end + +function [vc,se,stats] = standard_errors(r,a,beta,caviar) + + t = size(r,1); + + e2 = eye(2); + + m = reshape(beta,2,5); + dm1 = [e2 zeros(2,8)]; + dm2 = [zeros(4,2) eye(4) zeros(4)]; + dm3 = [zeros(4,6) eye(4)]; + + dq = zeros(2,10,t); + + for t = 2:t + dq(:,:,t) = dm1 + (kron(abs(r(t-1,:)),e2) * dm2) + (m(:,4:end) * dq(:,:,t-1)) + (kron(caviar(t-1,:),e2) * dm3); + end + + d = r - caviar; + + k = median(abs(d(:,1) - median(d(:,1)))); + h = t^(-1/3) * norminv(0.975)^(2/3) * ((1.5 * normpdf(norminv(a))^2) / ((2 * norminv(a)^2) + 1))^(1/3); + c = k * (norminv(a + h) - norminv(a - h)); + + q = zeros(10); + v = zeros(10); + + for t = 1:t + psi = a - (d(t,:) < 0).'; + eta = sum(reshape(dq(:,:,t),2,10) .* (psi * ones(1,10))); + + v = v + (eta.' * eta); + + qt = zeros(10); + + for j = 1:2 + dqt = reshape(dq(j,:,t),10,1); + qt = qt + ((abs(d(t,j)) < c) * (dqt * dqt.')); + end + + q = q + qt; + end + + q = q / (2 * c * t); + v = v / t; + vc = (q \ v / q) / t; + + r = [zeros(4,3), [e2; zeros(2)], zeros(4,2), [zeros(2); e2], zeros(4,1)]; + cv = ((r * beta).' / (r * vc * r.')) * (r * beta); + pval = 1 - chi2cdf(cv,4); + + se = sqrt(diag(vc)); + stats = [cv pval]; + +end + +function beta = univariate_model(r,q,a,beta0,options) + + w = size(beta0,1); + rq0 = zeros(w,1); + + for i = 1:w + [rq0(i),~,~] = univariate_model_objective(beta0(i,:).',r,a,q); + end + + m = [rq0 beta0]; + ms = sortrows(m,1); + beta1 = ms(1,2:end).'; + beta2 = fminsearch(@(x)univariate_model_objective(x,r,a,q),beta1,options); + beta = fminsearch(@(x)univariate_model_objective(x,r,a,q),beta2,options); + +end + +function [rq,hits,caviar] = univariate_model_objective(beta,r,a,q) + + t = numel(r); + + caviar = zeros(t,1); + caviar(1) = q; + + for t = 2:t + caviar(t) = beta(1) + (beta(2) * abs(r(t-1))) + (beta(3) * caviar(t-1)); + end + + hits = -((r < caviar) - a); + rq = hits.' * (r - caviar); + + if (~isfinite(rq)) + rq = 1e100; + end + +end + +function r = validate_input(r) + + t = size(r,1); + + if (t < 5) + error('The value of ''r'' is invalid. Expected input to be a matrix with at least 5 rows.'); + end + +end diff --git a/ScriptsModels/cimdo.m b/ScriptsModels/cimdo.m index b168643a..578b1729 100644 --- a/ScriptsModels/cimdo.m +++ b/ScriptsModels/cimdo.m @@ -1,7 +1,9 @@ % [INPUT] % r = A float t-by-n matrix representing the logarithmic returns. % pods = A vector of floats [0,1] of length n representing the probabilities of default. -% md = A string (either 'N' for normal or 'T' for Student's T) representing the multivariate distribution used by the model. +% md = A string representing the multivariate distribution used by the model: +% - 'N' for normal distribution. +% - 'T' for Student's T distribution. % % [OUTPUT] % g = An n^2-by-n matrix of numeric booleans representing the posterior density orthants. diff --git a/ScriptsModels/component_metrics.m b/ScriptsModels/component_metrics.m index 8e4265a5..52d8c717 100644 --- a/ScriptsModels/component_metrics.m +++ b/ScriptsModels/component_metrics.m @@ -1,6 +1,6 @@ % [INPUT] % r = A float t-by-n matrix representing the logarithmic returns. -% f = A float [0.2,0.8] representing the percentage of components to include in the computation of the Absorption Ratio (optional, default=0.2). +% f = A float [0.2,0.8] representing the percentage of components to include in the computation of the Absorption Ratio. % % [OUTPUT] % ar = A float [0,1] representing the Absorption Ratio. @@ -14,7 +14,7 @@ if (isempty(ip)) ip = inputParser(); ip.addRequired('r',@(x)validateattributes(x,{'double'},{'real' '2d' 'nonempty'})); - ip.addOptional('f',0.2,@(x)validateattributes(x,{'double'},{'real' 'finite' '>=' 0.2 '<=' 0.8 'scalar'})); + ip.addRequired('f',@(x)validateattributes(x,{'double'},{'real' 'finite' '>=' 0.2 '<=' 0.8 'scalar'})); end ip.parse(varargin{:}); diff --git a/ScriptsModels/connectedness_indicators.m b/ScriptsModels/connectedness_indicators.m new file mode 100644 index 00000000..ba2b4c71 --- /dev/null +++ b/ScriptsModels/connectedness_indicators.m @@ -0,0 +1,106 @@ +% [INPUT] +% am = A binary n-by-n matrix representing the adjcency matrix. +% gd = A vector of integers [1,Inf) of length k representing the group delimiters (optional, default=[]). +% +% [OUTPUT] +% dci = A float [0,Inf) representing the Dynamic Causality Index. +% cio = A float [0,Inf) representing the "In & Out" connections. +% cioo = A float [0,Inf) representing the "In & Out - Other" connections if group delimiters are provided, NaN otherwise. + +function [dci,cio,cioo] = connectedness_metrics(varargin) + + persistent ip; + + if (isempty(ip)) + ip = inputParser(); + ip.addRequired('am',@(x)validateattributes(x,{'double'},{'real' 'finite' 'binary' '2d' 'square' 'nonempty'})); + ip.addOptional('gd',[],@(x)validateattributes(x,{'double'},{'real' 'finite' 'integer' 'positive' 'increasing'})); + end + + ip.parse(varargin{:}); + + ipr = ip.Results; + [am,gd] = validate_input(ipr.am,ipr.gd); + + nargoutchk(2,3); + + [dci,cio,cioo] = connectedness_metrics_internal(am,gd); + +end + +function [dci,cio,cioo] = connectedness_metrics_internal(am,gd) + + n = size(am,1); + + dci = sum(sum(am)) / ((n ^ 2) - n); + + ni = zeros(n,1); + no = zeros(n,1); + + for i = 1:n + ni(i) = sum(am(:,i)); + no(i) = sum(am(i,:)); + end + + cio = (sum(ni) + sum(no)) / (2 * (n - 1)); + + if (isempty(gd)) + cioo = NaN; + else + gd_len = length(gd); + + nifo = zeros(n,1); + noto = zeros(n,1); + + for i = 1:n + group_1 = gd(1); + group_n = gd(gd_len); + + if (i <= group_1) + g_beg = 1; + g_end = group_1; + elseif (i > group_n) + g_beg = group_n + 1; + g_end = n; + else + for j = 1:gd_len-1 + g_j0 = gd(j); + g_j1 = gd(j+1); + + if ((i > g_j0) && (i <= g_j1)) + g_beg = g_j0 + 1; + g_end = g_j1; + end + end + end + + nifo(i) = ni(i) - sum(am(g_beg:g_end,i)); + noto(i) = no(i) - sum(am(i,g_beg:g_end)); + end + + cioo = (sum(nifo) + sum(noto)) / (2 * gd_len * (n / gd_len)); + end + +end + +function [am,gd] = validate_input(am,gd) + + amv = am(:); + + if (numel(amv) < 4) + error('The value of ''am'' is invalid. Expected input to be a square matrix with a minimum size of 2x2.'); + end + + if (any((amv ~= 0) & (amv ~= 1))) + error('The value of ''am'' is invalid. Expected input to be a binary matrix.'); + end + + if (~isempty(gd)) + if (~isvector(gd) || (numel(gd) < 2)) + error('The value of ''gd'' is invalid. Expected input to be a vector containing at least 2 elements.'); + end + + gd = gd(:); + end + +end diff --git a/ScriptsModels/cross_quantilograms_sb.m b/ScriptsModels/cross_quantilograms_sb.m index f06cd60f..cc4a51ee 100644 --- a/ScriptsModels/cross_quantilograms_sb.m +++ b/ScriptsModels/cross_quantilograms_sb.m @@ -10,7 +10,7 @@ % ci = A row vector of floats (-Inf,Inf) of length 2 representing the lower and upper confidence intervals. % % [NOTES] -% The model computes partial cross-quantilograms when n is greater than 2 using exogenous variables from 2+1 to n. +% The model computes partial cross-quantilograms when n is greater than 2 using exogenous variables from 3 to n. function [cq,ci] = cross_quantilograms_sb(varargin) diff --git a/ScriptsModels/cross_quantilograms_sn.m b/ScriptsModels/cross_quantilograms_sn.m index 74cf2f46..5c1015a1 100644 --- a/ScriptsModels/cross_quantilograms_sn.m +++ b/ScriptsModels/cross_quantilograms_sn.m @@ -10,7 +10,7 @@ % ci = A row vector of floats (-Inf,Inf) of length 2 representing the lower and upper confidence intervals. % % [NOTES] -% The model computes partial cross-quantilograms when n is greater than 2 using exogenous variables from 2+1 to n. +% The model computes partial cross-quantilograms when n is greater than 2 using exogenous variables from 3 to n. function [cq,ci] = cross_quantilograms_sn(varargin) diff --git a/ScriptsModels/cross_sectional_metrics.m b/ScriptsModels/cross_sectional_metrics.m index a0918b58..6cb5d34f 100644 --- a/ScriptsModels/cross_sectional_metrics.m +++ b/ScriptsModels/cross_sectional_metrics.m @@ -1,5 +1,5 @@ % [INPUT] -% r = A float t-by-2 matrix representing the logarithmic returns, in which the first column represents the market returns and the second column represents the firm returns. +% r = A float t-by-2 matrix (-Inf,Inf) representing the logarithmic returns, in which the first column represents the market returns and the second column represents the firm returns. % cp = A vector of floats [0,Inf) of length t representing the market capitalization. % lb = A vector of floats [0,Inf) of length t representing the liabilities. % lbr = A vector of floats [0,Inf) of length t representing the forward-rolled liabilities. @@ -64,8 +64,8 @@ beta = rho .* (sf ./ sm); c = quantile((rf_0 ./ sf),a); - var = -1 * min(sf * c,0); - es = -1 * min(sf * -(normpdf(c) / a),0); + var = -1 .* min(sf * c,0); + es = -1 .* min(sf * -(normpdf(c) / a),0); [covar,dcovar] = calculate_covar(rm_0,rf_0,-var,sv,a); [mes,lrmes] = calculate_mes(rm_0,sm,rf_0,sf,rho,beta,a,d); @@ -92,8 +92,8 @@ dcovar = b(2) .* (var - repmat(median(rf_0),length(rm_0),1)); - covar = -1 * min(covar,0); - dcovar = -1 * min(dcovar,0); + covar = -1 .* min(covar,0); + dcovar = -1 .* min(dcovar,0); end @@ -115,7 +115,7 @@ k1 = sum(u .* f) ./ f_sum; k2 = sum(x .* f) ./ f_sum; - mes = -1 * min((sf .* rho .* k1) + (sf .* z .* k2),0); + mes = -1 .* min((sf .* rho .* k1) + (sf .* z .* k2),0); lrmes = 1 - exp(log(1 - d) .* beta); end diff --git a/ScriptsModels/distress_insurance_premium.m b/ScriptsModels/distress_insurance_premium.m index 15e0d9cf..3b4cf030 100644 --- a/ScriptsModels/distress_insurance_premium.m +++ b/ScriptsModels/distress_insurance_premium.m @@ -241,10 +241,10 @@ h = ((o.' * o) ./ rs(j)) + (eye(s) .* 1e-6); sigma(:,:,j) = h; - v = chol(h,'upper'); - q0 = v.' \ x0.'; + cu = chol(h,'upper'); + q0 = cu.' \ x0.'; q1 = dot(q0,q0,1); - nc = (s * log(2 * pi())) + (2 * sum(log(diag(v)))); + nc = (s * log(2 * pi())) + (2 * sum(log(diag(cu)))); rho(:,j) = (-(nc + q1) / 2) + log(weights(j)); end diff --git a/ScriptsModels/granger_causality.m b/ScriptsModels/granger_causality.m index 11401933..d08f31a9 100644 --- a/ScriptsModels/granger_causality.m +++ b/ScriptsModels/granger_causality.m @@ -2,7 +2,11 @@ % data = A float t-by-n matrix representing the model input; the performed test is aimed to assess whether the first observation is Granger-caused by the second one. % a = A float [0.01,0.10] representing the probability level of the F test critical value. % lag_max = An integer [2,Inf) representing the maximum lag order to be evaluated for both restricted and unrestricted models (optional, default=10). -% lag_sel = A string ('AIC', 'BIC', 'FPE' or 'HQIC') representing the lag order selection criteria (optional, default='AIC'). +% lag_sel = A string representing the lag order selection criteria (optional, default='AIC'): +% - 'AIC' for Akaike's Information Criterion. +% - 'BIC' for Bayesian Information Criterion. +% - 'FPE' for Final Prediction Error. +% - 'HQIC' for Hannan-Quinn Information Criterion. % % [OUTPUT] % f = A float (-Inf,Inf) representing the F test statistic. diff --git a/ScriptsModels/illiq_indicator.m b/ScriptsModels/illiq_indicator.m index ec85d00a..c5406f4f 100644 --- a/ScriptsModels/illiq_indicator.m +++ b/ScriptsModels/illiq_indicator.m @@ -3,7 +3,11 @@ % v = A vector of floats [0,Inf) of length t representing the trading volumes. % sv = A float t-by-k matrix (-Inf,Inf) representing the state variables. % bw = An integer [21,252] representing the dimension of each rolling window. -% mem = A string ('B' for Baseline MEM, 'A' for Asymmetric MEM, 'P' for Asymmetric Power MEM, 'S' for Spline MEM) representing the MEM type. +% mem = A string representing the MEM type: +% - 'B' for Baseline MEM. +% - 'A' for Asymmetric MEM. +% - 'P' for Asymmetric Power MEM. +% - 'S' for Spline MEM. % mag = An integer [1,Inf) obtained as 10^x representing the magnitude of logarithmic returns and trading volumes (optional, default=[]). % % [OUTPUT] @@ -22,7 +26,7 @@ ip.addRequired('sv',@(x)validateattributes(x,{'double'},{'real' 'finite'})); ip.addRequired('bw',@(x)validateattributes(x,{'double'},{'real' 'finite' 'integer' '>=' 21 '<=' 252 'scalar'})); ip.addRequired('mem',@(x)any(validatestring(x,{'A' 'B' 'P' 'S'}))); - ip.addRequired('mag',@(x)validateattributes([],{'double'},{'real' 'finite' 'integer'})); + ip.addOptional('mag',[],@(x)validateattributes(x,{'double'},{'real' 'finite' 'integer'})); end ip.parse(varargin{:}); diff --git a/ScriptsModels/kmv_structural.m b/ScriptsModels/kmv_structural.m index 25b455a8..ac00f586 100644 --- a/ScriptsModels/kmv_structural.m +++ b/ScriptsModels/kmv_structural.m @@ -3,7 +3,9 @@ % db = A float or a vector of floats [0,Inf) of length k representing the default barrier. % r = A float or a vector of floats (-Inf,Inf) of length k representing the annualized risk-free interest rate. % t = A float or a vector of floats (0,Inf) of length k representing the time to maturity of default barrier. -% op = A string (either 'BSM' for Black-Scholes-Merton or 'GC' for Gram-Charlier) representing the option pricing model used by the Systemic CCA framework (optional, default='BSM'). +% op = A string representing the option pricing model used by the Systemic CCA framework (optional, default='BSM'): +% - 'BSM' for Black-Scholes-Merton. +% - 'GC' for Gram-Charlier. % % [OUTPUT] % va = A column vector of floats of length k representing the value of assets. diff --git a/ScriptsModels/multiplicative_error.m b/ScriptsModels/multiplicative_error.m index 11ffc3e5..2c52836c 100644 --- a/ScriptsModels/multiplicative_error.m +++ b/ScriptsModels/multiplicative_error.m @@ -5,9 +5,13 @@ % * other columns: exogenous variables [0,Inf), optional; % - for Asymmetric MEM and Asymmetric Power MEM: % * first column: endogenous variable [0,Inf), mandatory; -% * second column: returns (-Inf,Inf), mandatory; +% * second column: logarithmic returns (-Inf,Inf), mandatory; % * other columns: exogenous variables [0,Inf), optional; -% type = A string ('B' for Baseline MEM, 'A' for Asymmetric MEM, 'P' for Asymmetric Power MEM, 'S' for Spline MEM) representing the model type. +% type = A string representing the model type: +% - 'B' for Baseline MEM. +% - 'A' for Asymmetric MEM. +% - 'P' for Asymmetric Power MEM. +% - 'S' for Spline MEM. % q = An integer [1,Inf) representing the first order of the model (optional, default=1). % p = An integer [1,Inf) representing the second order of the model (optional, default=1). % @@ -393,10 +397,6 @@ else d = sign(data); end - - if (~any(d)) - error('The value of ''data'' is invalid. Expected input to contain negative values in the second column.'); - end zn = n - 2; zo = 3; diff --git a/ScriptsModels/price_discovery.m b/ScriptsModels/price_discovery.m index 3d243bed..cbe662a7 100644 --- a/ScriptsModels/price_discovery.m +++ b/ScriptsModels/price_discovery.m @@ -1,8 +1,14 @@ % [INPUT] % data = A float t-by-n matrix representing the model input. -% type = A string (either 'GG' for the Gonzalo-Granger component metric or 'H' for the Hasbrouck information metric) representing the type of metric to calculate. +% type = A string representing the type of metric to calculate: +% - 'GG' for Gonzalo-Granger Component Metric. +% - 'H' for Hasbrouck Information Metric. % lag_max = An integer [2,t-2] representing the maximum lag order to be evaluated (optional, default=10). -% lag_sel = A string ('AIC', 'BIC', 'FPE' or 'HQIC') representing the lag order selection criteria (optional, default='AIC'). +% lag_sel = A string representing the lag order selection criteria (optional, default='AIC'): +% - 'AIC' for Akaike's Information Criterion. +% - 'BIC' for Bayesian Information Criterion. +% - 'FPE' for Final Prediction Error. +% - 'HQIC' for Hannan-Quinn Information Criterion. % % [OUTPUT] % m1 = A float [0,1] representing the first value of the metric. diff --git a/ScriptsModels/roll_implicit_spread.m b/ScriptsModels/roll_implicit_spread.m index 31893bb7..e29bb8bf 100644 --- a/ScriptsModels/roll_implicit_spread.m +++ b/ScriptsModels/roll_implicit_spread.m @@ -1,5 +1,5 @@ % [INPUT] -% p = A vector of floats (-Inf,Inf) of length t representing the log prices. +% p = A vector of floats (-Inf,Inf) of length t representing the prices. % bw = An integer [21,252] representing the dimension of each rolling window. % w = An integer [500,Inf) representing the number of sweeps (optional, default=1000). % c = A float (0,Inf) representing the starting coefficient value (optional, default=0.01). diff --git a/ScriptsModels/variance_decomposition.m b/ScriptsModels/variance_decomposition.m index e2e73234..1ebc600b 100644 --- a/ScriptsModels/variance_decomposition.m +++ b/ScriptsModels/variance_decomposition.m @@ -2,7 +2,9 @@ % data = A float t-by-n matrix representing the model input. % lags = An integer [1,3] representing the number of lags of the VAR model (optional, default=2). % h = An integer [1,10] representing the prediction horizon (optional, default=4). -% fevd = A string (either 'G' for generalized or 'O' for orthogonal) representing the FEVD type (optional, default='G'). +% fevd = A string representing the FEVD type (optional, default='G'): +% - 'G' for generalized FEVD. +% - 'O' for orthogonal FEVD. % % [OUTPUT] % vd = A float n-by-n matrix (-Inf,Inf) representing the variance decomposition. @@ -140,14 +142,14 @@ end else c = nearest_spd(c); - covariance_dec = chol(c,'lower'); + cl = chol(c,'lower'); for i = 1:n indices = zeros(n,1); indices(i,1) = 1; for j = 1:h - irf(j,:,i) = ma{j} * covariance_dec * indices; + irf(j,:,i) = ma{j} * cl * indices; end end end diff --git a/run.m b/run.m index 0ed14009..7cdfc1f1 100644 --- a/run.m +++ b/run.m @@ -115,7 +115,7 @@ end if (ds_process) - ds = parse_dataset(file,ds_version,'dd/mm/yyyy','QQ yyyy','P','R'); + ds = parse_dataset(file,ds_version,'dd/mm/yyyy','QQ yyyy','P','R',0.05); analyze_dataset(ds); save(mat,'ds'); end