class svm_head(head):
def __init__(
self, m: int, n: int,
name: str = 'svm_head',
kernel: str = 'linear',
base_range: tuple = (-1, 1),
num_interval: int = 10,
epsilon: float = 1.0,
enable_bias: bool = False,
# optional parameters
with_lorr: bool = False,
r: int = 3,
with_residual: bool = False,
channel_num: int = 1,
with_batch_norm: bool = False,
with_softmax: bool = False,
# other parameters
parameters_init_method: str = 'xavier_normal',
device: str = 'cpu', *args, **kwargs
):
if kernel == 'linear':
data_transformation = identity_expansion(
device=device,
)
elif kernel == 'gaussian_rbf':
data_transformation = gaussian_rbf_expansion(
base_range=base_range,
num_interval=num_interval,
epsilon=epsilon,
device=device,
)
elif kernel == 'inverse_quadratic_rbf':
data_transformation = inverse_quadratic_rbf_expansion(
base_range=base_range,
num_interval=num_interval,
epsilon=epsilon,
device=device,
)
else:
raise ValueError('kernel must be linear or gaussian_rbf or inverse_quadratic_rbf...')
if with_lorr:
parameter_fabrication = lorr_reconciliation(
r=r,
enable_bias=enable_bias,
device=device,
)
else:
parameter_fabrication = identity_reconciliation(
enable_bias=enable_bias,
device=device,
)
if with_residual:
remainder = linear_remainder(
device=device
)
else:
remainder = zero_remainder(
device=device,
)
output_process_functions = []
if with_batch_norm:
output_process_functions.append(torch.nn.BatchNorm1d(num_features=n, device=device))
if with_softmax:
output_process_functions.append(torch.nn.Softmax(dim=-1))
super().__init__(
m=m, n=n, name=name,
data_transformation=data_transformation,
parameter_fabrication=parameter_fabrication,
remainder=remainder,
output_process_functions=output_process_functions,
channel_num=channel_num,
parameters_init_method=parameters_init_method,
device=device, *args, **kwargs
)