Vous êtes sur la page 1sur 20

Untitled3Last Checkpoint: 05/11/2019(autosaved) Logout

Python 3
Trusted

 File
 Edit
 View
 Insert
 Cell
 Kernel
 Help

Run

In [1]:

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import normalize
data = np.loadtxt("ex1data2.txt",delimiter=',')
df=pd.DataFrame(data=data,columns=['x1','x2','x3'])
df[df['x1']>2000]
df.loc[[2],['x1']]
Out[1]:

x1

2 2400.0

In [2]:

df.loc[2].loc['x1']
Out[2]:
2400.0
In [3]:

inside =['a','b','c','d']
outside =[1,2,3,4]
bezo=list(zip(inside,outside))
bezo=pd.MultiIndex.from_tuples(bezo)
print(bezo)
MultiIndex(levels=[['a', 'b', 'c', 'd'], [1, 2, 3, 4]],
labels=[[0, 1, 2, 3], [0, 1, 2, 3]])
In [19]:

d={'A':[1,2,4],
'B':[4,5,6],
'C':[7,8,9]}
d1={'A':[1,2,3],
'R':[4523,2335,12356],
'C':[7,668,669]}
df1=pd.DataFrame(d)
df2=pd.DataFrame(d1)
pd.merge(df1,df2,on=['A','C'])
Out[19]:

A B C R

0 1 4 7 4523

In [5]:

pd.DataFrame.from_dict(d,orient='index')
Out[5]:
0 1 2

A 1 2 3

B 4 5 6

C 7 8 9

In [8]:

# Data Preprocessing

# Importing the libraries


import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

# Importing the dataset


dataset = pd.read_csv('Data.csv')

X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values

# Taking care of missing data


from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
a = imputer.fit(X[:, 1:3])
type(a)
X[:, 1:3] = imputer.transform(a)
X=pd.DataFrame(X)
E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\utils\deprecat
ion.py:58: DeprecationWarning: Class Imputer is deprecated; Imputer was depre
cated in version 0.20 and will be removed in 0.22. Import impute.SimpleImpute
r from sklearn instead.
warnings.warn(msg, category=DeprecationWarning)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-8-818852c2bd06> in <module>
17 a = imputer.fit(X[:, 1:3])
18 type(a)
---> 19 X[:, 1:3] = imputer.transform(a)
20 X=pd.DataFrame(X)

E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\preprocessing\
imputation.py in transform(self, X)
307 check_is_fitted(self, 'statistics_')
308 X = check_array(X, accept_sparse='csc', dtype=FLOAT_DTYPE
S,
--> 309 force_all_finite=False, copy=self.copy)
310 statistics = self.statistics_
311 if X.shape[1] != statistics.shape[0]:

E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\utils\validati
on.py in check_array(array, accept_sparse, accept_large_sparse, dtype, order,
copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_
features, warn_on_dtype, estimator)
525 try:
526 warnings.simplefilter('error', ComplexWarning)
--> 527 array = np.asarray(array, dtype=dtype, order=order)
528 except ComplexWarning:
529 raise ValueError("Complex data not supported\n"

E:\cls python\Anaconda\envs\kirollos\lib\site-packages\numpy\core\numeric.py
in asarray(a, dtype, order)
536
537 """
--> 538 return array(a, dtype, copy=False, order=order)
539
540

TypeError: float() argument must be a string or a number, not 'Imputer'

In [7]:

type(a)
Out[7]:
sklearn.preprocessing.imputation.Imputer
In [9]:
le = preprocessing.LabelEncoder()
le.fit(["paris", "paris", "tokyo", "amsterdam"])

>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
File "<ipython-input-9-4b5fbd65babc>", line 7
array([2, 2, 1]...)
^
SyntaxError: invalid syntax

In [1]:

# Data Preprocessing

# Importing the libraries


import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

# Importing the dataset


dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values

# Taking care of missing data


from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
# Encoding categorical data
# Encoding the Independent Variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
#labelencoder_X = LabelEncoder()
#X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
# Encoding the Dependent Variable
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\utils\deprecat
ion.py:58: DeprecationWarning: Class Imputer is deprecated; Imputer was depre
cated in version 0.20 and will be removed in 0.22. Import impute.SimpleImpute
r from sklearn instead.
warnings.warn(msg, category=DeprecationWarning)
E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\preprocessing\
_encoders.py:392: DeprecationWarning: The 'categorical_features' keyword is d
eprecated in version 0.20 and will be removed in 0.22. You can use the Column
Transformer instead.
"use the ColumnTransformer instead.", DeprecationWarning)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-1-f0ce85662e31> in <module>
23 #X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
24 onehotencoder = OneHotEncoder(categorical_features = [0])
---> 25 X = onehotencoder.fit_transform(X).toarray()
26 # Encoding the Dependent Variable
27 labelencoder_y = LabelEncoder()

E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\preprocessing\
_encoders.py in fit_transform(self, X, y)
514 return _transform_selected(
515 X, self._legacy_fit_transform, self.dtype,
--> 516 self._categorical_features, copy=True)
517 else:
518 return self.fit(X).transform(X)

E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\preprocessing\
base.py in _transform_selected(X, transform, dtype, selected, copy, retain_or
der)
43 Xt : array or sparse matrix, shape=(n_samples, n_features_new)
44 """
---> 45 X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DT
YPES)
46
47 if sparse.issparse(X) and retain_order:

E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\utils\validati
on.py in check_array(array, accept_sparse, accept_large_sparse, dtype, order,
copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_
features, warn_on_dtype, estimator)
525 try:
526 warnings.simplefilter('error', ComplexWarning)
--> 527 array = np.asarray(array, dtype=dtype, order=order)
528 except ComplexWarning:
529 raise ValueError("Complex data not supported\n"

E:\cls python\Anaconda\envs\kirollos\lib\site-packages\numpy\core\numeric.py
in asarray(a, dtype, order)
536
537 """
--> 538 return array(a, dtype, copy=False, order=order)
539
540

ValueError: could not convert string to float: 'France'

In [1]:

s='ahmed'
len(s)
Out[1]:
5
In [5]:

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
data=np.loadtxt('iris.txt',delimiter =',')
x=data[:,:-1]
print(x.shape)
y=data[:,-1]
y=y.reshape((-1,1))
onehotencoder = OneHotEncoder(categorical_features = [0])
y = onehotencoder.fit_transform(y).toarray()

x=np.concatenate((np.ones((150,1)),x))
(150, 4)
E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\preprocessing\
_encoders.py:371: FutureWarning: The handling of integer data will change in
version 0.22. Currently, the categories are determined based on the range [0,
max(values)], while in the future they will be determined based on the uniqu
e values.
If you want the future behaviour and silence this warning, you can specify "c
ategories='auto'".
In case you used a LabelEncoder before this OneHotEncoder to convert the cate
gories to integers, then you can now use the OneHotEncoder directly.
warnings.warn(msg, FutureWarning)
E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\preprocessing\
_encoders.py:392: DeprecationWarning: The 'categorical_features' keyword is d
eprecated in version 0.20 and will be removed in 0.22. You can use the Column
Transformer instead.
"use the ColumnTransformer instead.", DeprecationWarning)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-5-41f339a7e44a> in <module>
12 y = onehotencoder.fit_transform(y).toarray()
13
---> 14 x=np.concatenate((np.ones((150,1)),x))

ValueError: all the input array dimensions except for the concatenation axis
must match exactly

In [6]:

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split

data=np.loadtxt('iris.txt',delimiter =',')
x=data[:,:-1]
y=data[:,-1]
y=y.reshape((-1,1))
onehotencoder = OneHotEncoder(categorical_features = [0])
y = onehotencoder.fit_transform(y).toarray()
x=np.c_[(np.ones((150,1)),x)]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
alpha=0.001
theta=np.array([0.5,0.5,0.5,0.5,0.5])
for i in range (100000):
z=X_train.dot(theta.T)
z.reshape(-1,1)
h=(1/(1+np.exp(-1*z)))
h=np.c_[(np.zeros((120,2)),h)]
j=(1/(2*150)*(sum(h-y_train)**2))
err=np.c_[np.zeros((120,2)),h-y_train]
theta=theta+(alpha)*(X_train.T.dot(err))
print(j)
E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\preprocessing\
_encoders.py:371: FutureWarning: The handling of integer data will change in
version 0.22. Currently, the categories are determined based on the range [0,
max(values)], while in the future they will be determined based on the uniqu
e values.
If you want the future behaviour and silence this warning, you can specify "c
ategories='auto'".
In case you used a LabelEncoder before this OneHotEncoder to convert the cate
gories to integers, then you can now use the OneHotEncoder directly.
warnings.warn(msg, FutureWarning)
E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\preprocessing\
_encoders.py:392: DeprecationWarning: The 'categorical_features' keyword is d
eprecated in version 0.20 and will be removed in 0.22. You can use the Column
Transformer instead.
"use the ColumnTransformer instead.", DeprecationWarning)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-52825d3cc6a2> in <module>
20 h=(1/(1+np.exp(-1*z)))
21 h=np.c_[(np.zeros((120,2)),h)]
---> 22 j=(1/(2*150)*(sum(h-y_train)**2))
23 err=np.c_[np.zeros((120,2)),h-y_train]
24 theta=theta+(alpha)*(X_train.T.dot(err))

ValueError: operands could not be broadcast together with shapes (120,7) (120
,3)

In [5]:

import numpy as np
x=np.array([[23,12,646,456],[346,46,321,1,61]])
x.reshape((-1,1))
print(x)
[list([23, 12, 646, 456]) list([346, 46, 321, 1, 61])]
In [15]:

# Multiple Linear Regression

# Importing the libraries


import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

# Importing the dataset


dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values

# Encoding categorical data


from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder = LabelEncoder()
X[:, 3] = labelencoder.fit_transform(X[:, 3])
onehotencoder = OneHotEncoder(categorical_features = [3])
X = onehotencoder.fit_transform(X).toarray()

# Avoiding the Dummy Variable Trap


X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)

# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""

# Fitting Multiple Linear Regression to the Training set


from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)

# Predicting the Test set results


y_pred = regressor.predict(X_test)

# Building the optimal model using Backward Elimination


X=np.append(np.ones ((50,1)), values=X, axis=1)
import statsmodels.formula.api as sm
def backwardElimination(x, SL):
numVars = len(x[0])
temp = np.zeros((50,6)).astype(int)
for i in range(0, numVars):
regressor_OLS = sm.OLS(y, x).fit()
maxVar = max(regressor_OLS.pvalues).astype(float)
print(regressor_OLS.summary())
adjR_before = regressor_OLS.rsquared_adj.astype(float)
if maxVar > SL:
for j in range(0, numVars - i):
if (regressor_OLS.pvalues[j].astype(float) == maxVar):
temp[:,j] = x[:, j]
x = np.delete(x, j, 1)
tmp_regressor = sm.OLS(y, x).fit()
adjR_after = tmp_regressor.rsquared_adj.astype(float)
if (adjR_before >= adjR_after):
print(x.shape,temp.shape)
x_rollback = np.hstack((x, temp[:,[0,j]]))
print(x.shape,temp.shape,j)
print(x_rollback.shape)
x_rollback = np.delete(x_rollback, j, 1)
print(x_rollback.shape)
print (regressor_OLS.summary())
return x_rollback
else:
continue
regressor_OLS.summary()
print (x)
return x
SL = 0.05
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
X_Modeled = backwardElimination(X_opt, SL)
OLS Regression Results

=============================================================================
=
Dep. Variable: y R-squared: 0.95
1
Model: OLS Adj. R-squared: 0.94
5
Method: Least Squares F-statistic: 169.
9
Date: Fri, 24 May 2019 Prob (F-statistic): 1.34e-2
7
Time: 15:57:08 Log-Likelihood: -525.3
8
No. Observations: 50 AIC: 1063
.
Df Residuals: 44 BIC: 1074
.
Df Model: 5

Covariance Type: nonrobust

=============================================================================
=
coef std err t P>|t| [0.025 0.975
]
-----------------------------------------------------------------------------
-
const 5.013e+04 6884.820 7.281 0.000 3.62e+04 6.4e+0
4
x1 198.7888 3371.007 0.059 0.953 -6595.030 6992.60
7
x2 -41.8870 3256.039 -0.013 0.990 -6604.003 6520.22
9
x3 0.8060 0.046 17.369 0.000 0.712 0.90
0
x4 -0.0270 0.052 -0.517 0.608 -0.132 0.07
8
x5 0.0270 0.017 1.574 0.123 -0.008 0.06
2
=============================================================================
=
Omnibus: 14.782 Durbin-Watson: 1.28
3
Prob(Omnibus): 0.001 Jarque-Bera (JB): 21.26
6
Skew: -0.948 Prob(JB): 2.41e-0
5
Kurtosis: 5.572 Cond. No. 1.45e+0
6
=============================================================================
=

Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correc
tly specified.
[2] The condition number is large, 1.45e+06. This might indicate that there a
re
strong multicollinearity or other numerical problems.
OLS Regression Results

=============================================================================
=
Dep. Variable: y R-squared: 0.95
1
Model: OLS Adj. R-squared: 0.94
6
Method: Least Squares F-statistic: 217.
2
Date: Fri, 24 May 2019 Prob (F-statistic): 8.49e-2
9
Time: 15:57:08 Log-Likelihood: -525.3
8
No. Observations: 50 AIC: 1061
.
Df Residuals: 45 BIC: 1070
.
Df Model: 4

Covariance Type: nonrobust

=============================================================================
=
coef std err t P>|t| [0.025 0.975
]
-----------------------------------------------------------------------------
-
const 5.011e+04 6647.870 7.537 0.000 3.67e+04 6.35e+0
4
x1 220.1585 2900.536 0.076 0.940 -5621.821 6062.13
8
x2 0.8060 0.046 17.606 0.000 0.714 0.89
8
x3 -0.0270 0.052 -0.523 0.604 -0.131 0.07
7
x4 0.0270 0.017 1.592 0.118 -0.007 0.06
1
=============================================================================
=
Omnibus: 14.758 Durbin-Watson: 1.28
2
Prob(Omnibus): 0.001 Jarque-Bera (JB): 21.17
2
Skew: -0.948 Prob(JB): 2.53e-0
5
Kurtosis: 5.563 Cond. No. 1.40e+0
6
=============================================================================
=

Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correc
tly specified.
[2] The condition number is large, 1.4e+06. This might indicate that there ar
e
strong multicollinearity or other numerical problems.
OLS Regression Results
=============================================================================
=
Dep. Variable: y R-squared: 0.95
1
Model: OLS Adj. R-squared: 0.94
8
Method: Least Squares F-statistic: 296.
0
Date: Fri, 24 May 2019 Prob (F-statistic): 4.53e-3
0
Time: 15:57:08 Log-Likelihood: -525.3
9
No. Observations: 50 AIC: 1059
.
Df Residuals: 46 BIC: 1066
.
Df Model: 3

Covariance Type: nonrobust

=============================================================================
=
coef std err t P>|t| [0.025 0.975
]
-----------------------------------------------------------------------------
-
const 5.012e+04 6572.353 7.626 0.000 3.69e+04 6.34e+0
4
x1 0.8057 0.045 17.846 0.000 0.715 0.89
7
x2 -0.0268 0.051 -0.526 0.602 -0.130 0.07
6
x3 0.0272 0.016 1.655 0.105 -0.006 0.06
0
=============================================================================
=
Omnibus: 14.838 Durbin-Watson: 1.28
2
Prob(Omnibus): 0.001 Jarque-Bera (JB): 21.44
2
Skew: -0.949 Prob(JB): 2.21e-0
5
Kurtosis: 5.586 Cond. No. 1.40e+0
6
=============================================================================
=

Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correc
tly specified.
[2] The condition number is large, 1.4e+06. This might indicate that there ar
e
strong multicollinearity or other numerical problems.
OLS Regression Results

=============================================================================
=
Dep. Variable: y R-squared: 0.95
0
Model: OLS Adj. R-squared: 0.94
8
Method: Least Squares F-statistic: 450.
8
Date: Fri, 24 May 2019 Prob (F-statistic): 2.16e-3
1
Time: 15:57:08 Log-Likelihood: -525.5
4
No. Observations: 50 AIC: 1057
.
Df Residuals: 47 BIC: 1063
.
Df Model: 2

Covariance Type: nonrobust

=============================================================================
=
coef std err t P>|t| [0.025 0.975
]
-----------------------------------------------------------------------------
-
const 4.698e+04 2689.933 17.464 0.000 4.16e+04 5.24e+0
4
x1 0.7966 0.041 19.266 0.000 0.713 0.88
0
x2 0.0299 0.016 1.927 0.060 -0.001 0.06
1
=============================================================================
=
Omnibus: 14.677 Durbin-Watson: 1.25
7
Prob(Omnibus): 0.001 Jarque-Bera (JB): 21.16
1
Skew: -0.939 Prob(JB): 2.54e-0
5
Kurtosis: 5.575 Cond. No. 5.32e+0
5
=============================================================================
=

Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correc
tly specified.
[2] The condition number is large, 5.32e+05. This might indicate that there a
re
strong multicollinearity or other numerical problems.
(50, 2) (50, 6)
(50, 2) (50, 6) 2
(50, 4)
(50, 3)
OLS Regression Results

=============================================================================
=
Dep. Variable: y R-squared: 0.95
0
Model: OLS Adj. R-squared: 0.94
8
Method: Least Squares F-statistic: 450.
8
Date: Fri, 24 May 2019 Prob (F-statistic): 2.16e-3
1
Time: 15:57:08 Log-Likelihood: -525.5
4
No. Observations: 50 AIC: 1057
.
Df Residuals: 47 BIC: 1063
.
Df Model: 2
Covariance Type: nonrobust

=============================================================================
=
coef std err t P>|t| [0.025 0.975
]
-----------------------------------------------------------------------------
-
const 4.698e+04 2689.933 17.464 0.000 4.16e+04 5.24e+0
4
x1 0.7966 0.041 19.266 0.000 0.713 0.88
0
x2 0.0299 0.016 1.927 0.060 -0.001 0.06
1
=============================================================================
=
Omnibus: 14.677 Durbin-Watson: 1.25
7
Prob(Omnibus): 0.001 Jarque-Bera (JB): 21.16
1
Skew: -0.939 Prob(JB): 2.54e-0
5
Kurtosis: 5.575 Cond. No. 5.32e+0
5
=============================================================================
=

Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correc
tly specified.
[2] The condition number is large, 5.32e+05. This might indicate that there a
re
strong multicollinearity or other numerical problems.
E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\preprocessing\
_encoders.py:371: FutureWarning: The handling of integer data will change in
version 0.22. Currently, the categories are determined based on the range [0,
max(values)], while in the future they will be determined based on the uniqu
e values.
If you want the future behaviour and silence this warning, you can specify "c
ategories='auto'".
In case you used a LabelEncoder before this OneHotEncoder to convert the cate
gories to integers, then you can now use the OneHotEncoder directly.
warnings.warn(msg, FutureWarning)
E:\cls python\Anaconda\envs\kirollos\lib\site-packages\sklearn\preprocessing\
_encoders.py:392: DeprecationWarning: The 'categorical_features' keyword is d
eprecated in version 0.20 and will be removed in 0.22. You can use the Column
Transformer instead.
"use the ColumnTransformer instead.", DeprecationWarning)
In [8]:

x
Out[8]:
array([list([23, 12, 646, 456]), list([346, 46, 321, 1, 61])],
dtype=object)
In [10]:

X_Modeled
Out[10]:
array([[1.0000000e+00, 1.6534920e+05, 4.7178400e+05],
[1.0000000e+00, 1.6259770e+05, 4.4389800e+05],
[1.0000000e+00, 1.5344151e+05, 4.0793400e+05],
[1.0000000e+00, 1.4437241e+05, 3.8319900e+05],
[1.0000000e+00, 1.4210734e+05, 3.6616800e+05],
[1.0000000e+00, 1.3187690e+05, 3.6286100e+05],
[1.0000000e+00, 1.3461546e+05, 1.2771600e+05],
[1.0000000e+00, 1.3029813e+05, 3.2387600e+05],
[1.0000000e+00, 1.2054252e+05, 3.1161300e+05],
[1.0000000e+00, 1.2333488e+05, 3.0498100e+05],
[1.0000000e+00, 1.0191308e+05, 2.2916000e+05],
[1.0000000e+00, 1.0067196e+05, 2.4974400e+05],
[1.0000000e+00, 9.3863750e+04, 2.4983900e+05],
[1.0000000e+00, 9.1992390e+04, 2.5266400e+05],
[1.0000000e+00, 1.1994324e+05, 2.5651200e+05],
[1.0000000e+00, 1.1452361e+05, 2.6177600e+05],
[1.0000000e+00, 7.8013110e+04, 2.6434600e+05],
[1.0000000e+00, 9.4657160e+04, 2.8257400e+05],
[1.0000000e+00, 9.1749160e+04, 2.9491900e+05],
[1.0000000e+00, 8.6419700e+04, 0.0000000e+00],
[1.0000000e+00, 7.6253860e+04, 2.9866400e+05],
[1.0000000e+00, 7.8389470e+04, 2.9973700e+05],
[1.0000000e+00, 7.3994560e+04, 3.0331900e+05],
[1.0000000e+00, 6.7532530e+04, 3.0476800e+05],
[1.0000000e+00, 7.7044010e+04, 1.4057400e+05],
[1.0000000e+00, 6.4664710e+04, 1.3796200e+05],
[1.0000000e+00, 7.5328870e+04, 1.3405000e+05],
[1.0000000e+00, 7.2107600e+04, 3.5318300e+05],
[1.0000000e+00, 6.6051520e+04, 1.1814800e+05],
[1.0000000e+00, 6.5605480e+04, 1.0713800e+05],
[1.0000000e+00, 6.1994480e+04, 9.1131000e+04],
[1.0000000e+00, 6.1136380e+04, 8.8218000e+04],
[1.0000000e+00, 6.3408860e+04, 4.6085000e+04],
[1.0000000e+00, 5.5493950e+04, 2.1463400e+05],
[1.0000000e+00, 4.6426070e+04, 2.1079700e+05],
[1.0000000e+00, 4.6014020e+04, 2.0551700e+05],
[1.0000000e+00, 2.8663760e+04, 2.0112600e+05],
[1.0000000e+00, 4.4069950e+04, 1.9702900e+05],
[1.0000000e+00, 2.0229590e+04, 1.8526500e+05],
[1.0000000e+00, 3.8558510e+04, 1.7499900e+05],
[1.0000000e+00, 2.8754330e+04, 1.7279500e+05],
[1.0000000e+00, 2.7892920e+04, 1.6447000e+05],
[1.0000000e+00, 2.3640930e+04, 1.4800100e+05],
[1.0000000e+00, 1.5505730e+04, 3.5534000e+04],
[1.0000000e+00, 2.2177740e+04, 2.8334000e+04],
[1.0000000e+00, 1.0002300e+03, 1.9030000e+03],
[1.0000000e+00, 1.3154600e+03, 2.9711400e+05],
[1.0000000e+00, 0.0000000e+00, 0.0000000e+00],
[1.0000000e+00, 5.4205000e+02, 0.0000000e+00],
[1.0000000e+00, 0.0000000e+00, 4.5173000e+04]])

Vous aimerez peut-être aussi